From 9fcce26ff9f8f25981a274f6acb452eb4f3c53e0 Mon Sep 17 00:00:00 2001 From: Chuan Long Date: Wed, 9 Sep 2020 15:59:41 -0500 Subject: [PATCH 1/2] Add Dockerfile for the cloud deployment back, and make sync_period configurable for the cloud deployment --- Dockerfile.cloud-deployment | 6 ++++++ Dockerfile => Dockerfile.docker-compose | 0 startup-kube.sh | 5 +++-- version/version.go | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 Dockerfile.cloud-deployment rename Dockerfile => Dockerfile.docker-compose (100%) diff --git a/Dockerfile.cloud-deployment b/Dockerfile.cloud-deployment new file mode 100644 index 0000000..25d55df --- /dev/null +++ b/Dockerfile.cloud-deployment @@ -0,0 +1,6 @@ +FROM golang +ADD . /go/src/devicedb +RUN go install devicedb +ENTRYPOINT [ "/bin/sh", "/go/src/devicedb/startup-kube.sh" ] +EXPOSE 8080 +EXPOSE 9090 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile.docker-compose similarity index 100% rename from Dockerfile rename to Dockerfile.docker-compose diff --git a/startup-kube.sh b/startup-kube.sh index 25e17c8..a47bf2f 100644 --- a/startup-kube.sh +++ b/startup-kube.sh @@ -34,8 +34,9 @@ # HOST # SEED_NODE_ADDRESS # LOG_LEVEL +# SYNC_PERIOD if [ $NODE_NAME = $SEED_NODE_NAME ]; then - /go/bin/devicedb cluster start -store $DATA_STORAGE_PATH -snapshot_store $SNAPSHOT_STORAGE_PATH -replication_factor $REPLICATION_FACTOR -port $PORT -host $HOST -log_level $LOG_LEVEL; + /go/bin/devicedb cluster start -store $DATA_STORAGE_PATH -snapshot_store $SNAPSHOT_STORAGE_PATH -replication_factor $REPLICATION_FACTOR -port $PORT -host $HOST -log_level $LOG_LEVEL -sync_period $SYNC_PERIOD; else - /go/bin/devicedb cluster start -store $DATA_STORAGE_PATH -snapshot_store $SNAPSHOT_STORAGE_PATH -replication_factor $REPLICATION_FACTOR -port $PORT -host $HOST -log_level $LOG_LEVEL -join $SEED_NODE_ADDRESS; + /go/bin/devicedb cluster start -store $DATA_STORAGE_PATH -snapshot_store $SNAPSHOT_STORAGE_PATH -replication_factor $REPLICATION_FACTOR -port $PORT -host $HOST -log_level $LOG_LEVEL -sync_period $SYNC_PERIOD -join $SEED_NODE_ADDRESS; fi; \ No newline at end of file diff --git a/version/version.go b/version/version.go index a88bb48..d860f12 100644 --- a/version/version.go +++ b/version/version.go @@ -29,4 +29,4 @@ package version // Increment x when you make incompatible API changes // Increment y when you add functionality in a backwards-compatible manner // Increment z when you make backwards-compatible bug fixes -var DEVICEDB_VERSION = "1.9.4" +var DEVICEDB_VERSION = "1.9.5" From 8c8760b5e80c078f0ca76cd45403ac9e7e176212 Mon Sep 17 00:00:00 2001 From: Chuan Long Date: Wed, 9 Sep 2020 16:15:44 -0500 Subject: [PATCH 2/2] update vendor files --- go.mod | 33 + go.sum | 52 + .../github.com/op/go-logging/.travis.yml | 6 - .../github.com/op/go-logging/CHANGELOG.md | 19 - .../github.com/op/go-logging/CONTRIBUTORS | 5 - .../vendor/github.com/op/go-logging/LICENSE | 27 - .../vendor/github.com/op/go-logging/README.md | 93 - .../github.com/op/go-logging/backend.go | 39 - .../vendor/github.com/op/go-logging/format.go | 414 --- .../vendor/github.com/op/go-logging/level.go | 128 - .../github.com/op/go-logging/log_nix.go | 109 - .../github.com/op/go-logging/log_windows.go | 107 - .../vendor/github.com/op/go-logging/logger.go | 259 -- .../vendor/github.com/op/go-logging/memory.go | 237 -- .../vendor/github.com/op/go-logging/multi.go | 65 - .../vendor/github.com/op/go-logging/syslog.go | 53 - .../op/go-logging/syslog_fallback.go | 28 - .../github.com/armPelionEdge/devicedb/LICENSE | 21 + .../armPelionEdge/devicedb/alerts/alert.go | 33 + .../devicedb/alerts/alert_map.go | 93 + .../devicedb/alerts/alert_store.go | 85 + .../devicedb/benchmarks/many_relays.go | 191 ++ .../armPelionEdge/devicedb/bucket/bucket.go | 59 + .../devicedb/bucket/bucket_list.go | 93 + .../devicedb/bucket/builtin/cloud.go | 76 + .../devicedb/bucket/builtin/default.go | 67 + .../devicedb/bucket/builtin/local.go | 67 + .../devicedb/bucket/builtin/lww.go | 67 + .../armPelionEdge/devicedb/bucket/monitor.go | 184 ++ .../armPelionEdge/devicedb/bucket/store.go | 1462 +++++++++++ .../devicedb/client/api_client.go | 396 +++ .../armPelionEdge/devicedb/client/batch.go | 76 + .../armPelionEdge/devicedb/client/client.go | 238 ++ .../armPelionEdge/devicedb/client/entry.go | 30 + .../devicedb/client/entry_iterator.go | 68 + .../devicedb/client_relay/client.go | 342 +++ .../devicedb/client_relay/entry_iterator.go | 156 ++ .../devicedb/client_relay/update.go | 37 + .../devicedb/client_relay/update_iterator.go | 139 + .../armPelionEdge/devicedb/cluster/command.go | 387 +++ .../devicedb/cluster/config_controller.go | 548 ++++ .../cluster/config_controller_builder.go | 100 + .../devicedb/cluster/controller.go | 889 +++++++ .../armPelionEdge/devicedb/cluster/delta.go | 212 ++ .../devicedb/cluster/partitioner.go | 438 ++++ .../armPelionEdge/devicedb/cluster/state.go | 405 +++ .../armPelionEdge/devicedb/clusterio/agent.go | 602 +++++ .../devicedb/clusterio/clusterio.go | 73 + .../devicedb/clusterio/read_merger.go | 126 + .../devicedb/clusterio/read_repairer.go | 133 + .../clusterio/sibling_set_merge_iterator.go | 140 + .../devicedb/compatibility/compatibility.go | 322 +++ .../armPelionEdge/devicedb/data/dvv.go | 170 ++ .../armPelionEdge/devicedb/data/hash.go | 82 + .../armPelionEdge/devicedb/data/row.go | 65 + .../armPelionEdge/devicedb/data/sibling.go | 116 + .../devicedb/data/sibling_set.go | 369 +++ .../devicedb/data/sibling_set_iterator.go | 35 + .../armPelionEdge/devicedb/data/update.go | 75 + .../armPelionEdge/devicedb/error/errors.go | 120 + .../devicedb/historian/historian.go | 638 +++++ .../armPelionEdge/devicedb/logging/logging.go | 32 + .../armPelionEdge/devicedb/merkle/merkle.go | 415 +++ .../devicedb/node/cluster_node.go | 1235 +++++++++ .../devicedb/node/clusterio_node_client.go | 457 ++++ .../node/clusterio_partition_resolver.go | 47 + .../armPelionEdge/devicedb/node/errors.go | 33 + .../armPelionEdge/devicedb/node/node.go | 62 + .../devicedb/node/node_facade.go | 191 ++ .../devicedb/node/node_state_coordinator.go | 200 ++ .../armPelionEdge/devicedb/node/options.go | 69 + .../devicedb/node/snapshotter.go | 208 ++ .../node_facade/node_coordinator_facade.go | 83 + .../devicedb/partition/partition.go | 75 + .../devicedb/partition/partition_factory.go | 44 + .../devicedb/partition/partition_iterator.go | 44 + .../devicedb/partition/partition_pool.go | 67 + .../devicedb/raft/memory_storage.go | 136 + .../armPelionEdge/devicedb/raft/node.go | 393 +++ .../armPelionEdge/devicedb/raft/storage.go | 696 +++++ .../armPelionEdge/devicedb/raft/transport.go | 285 ++ .../devicedb/resolver/conflict_resolver.go | 33 + .../resolver/strategies/last_writer_wins.go | 48 + .../resolver/strategies/multi_value.go | 37 + .../armPelionEdge/devicedb/rest/merkle.go | 46 + .../armPelionEdge/devicedb/routes/cluster.go | 250 ++ .../devicedb/routes/cluster_facade.go | 70 + .../devicedb/routes/kubernetes.go | 40 + .../armPelionEdge/devicedb/routes/log_dump.go | 70 + .../armPelionEdge/devicedb/routes/models.go | 115 + .../devicedb/routes/partitions.go | 371 +++ .../armPelionEdge/devicedb/routes/profile.go | 40 + .../devicedb/routes/prometheus.go | 37 + .../armPelionEdge/devicedb/routes/relays.go | 184 ++ .../armPelionEdge/devicedb/routes/sites.go | 406 +++ .../armPelionEdge/devicedb/routes/snapshot.go | 163 ++ .../armPelionEdge/devicedb/routes/sync.go | 52 + .../devicedb/server/cloud_server.go | 184 ++ .../armPelionEdge/devicedb/server/event.go | 58 + .../armPelionEdge/devicedb/server/peer.go | 1568 +++++++++++ .../armPelionEdge/devicedb/server/server.go | 1299 ++++++++++ .../armPelionEdge/devicedb/server/sync.go | 881 +++++++ .../armPelionEdge/devicedb/shared/config.go | 267 ++ .../armPelionEdge/devicedb/shared/gc.go | 69 + .../armPelionEdge/devicedb/site/site.go | 124 + .../devicedb/site/site_factory.go | 113 + .../devicedb/site/site_iterator.go | 159 ++ .../armPelionEdge/devicedb/site/site_pool.go | 200 ++ .../devicedb/site/site_pool_iterator.go | 168 ++ .../armPelionEdge/devicedb/storage/metrics.go | 50 + .../devicedb/storage/storageEngine.go | 808 ++++++ .../devicedb/sync/bucket_proxy.go | 362 +++ .../devicedb/sync/bucket_sync_http.go | 278 ++ .../devicedb/sync/merkle_proxy.go | 175 ++ .../devicedb/sync/sync_scheduler.go | 305 +++ .../devicedb/transfer/canceler.go | 29 + .../armPelionEdge/devicedb/transfer/chunk.go | 48 + .../devicedb/transfer/downloader.go | 380 +++ .../devicedb/transfer/transfer.go | 206 ++ .../devicedb/transfer/transfer_agent.go | 270 ++ .../devicedb/transfer/transfer_encoding.go | 130 + .../devicedb/transfer/transfer_factory.go | 47 + .../transfer/transfer_partner_strategy.go | 62 + .../devicedb/transfer/transfer_proposer.go | 160 ++ .../devicedb/transfer/transfer_transport.go | 107 + .../devicedb/transport/transport.go | 223 ++ .../armPelionEdge/devicedb/util/multilock.go | 88 + .../devicedb/util/new_storage_driver.go | 33 + .../devicedb/util/random_string.go | 41 + .../armPelionEdge/devicedb/util/request.go | 67 + .../devicedb/util/rw_try_lock.go | 72 + .../armPelionEdge/devicedb/util/uuid.go | 48 + .../armPelionEdge/devicedb/version/version.go | 32 + .../edge-go-logger/logging/env_watcher.go | 65 - .../edge-go-logger/logging/logging.go | 95 - .../github.com/op/go-logging/.travis.yml | 6 - .../github.com/op/go-logging/CHANGELOG.md | 19 - .../github.com/op/go-logging/CONTRIBUTORS | 5 - .../vendor/github.com/op/go-logging/README.md | 93 - .../github.com/op/go-logging/backend.go | 39 - .../vendor/github.com/op/go-logging/format.go | 414 --- .../vendor/github.com/op/go-logging/level.go | 128 - .../github.com/op/go-logging/log_nix.go | 109 - .../github.com/op/go-logging/log_windows.go | 107 - .../vendor/github.com/op/go-logging/logger.go | 259 -- .../vendor/github.com/op/go-logging/memory.go | 237 -- .../vendor/github.com/op/go-logging/multi.go | 65 - .../vendor/github.com/op/go-logging/syslog.go | 53 - .../op/go-logging/syslog_fallback.go | 28 - vendor/github.com/gogo/protobuf/AUTHORS | 15 - vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 - vendor/github.com/gogo/protobuf/LICENSE | 36 - .../gogo/protobuf/gogoproto/Makefile | 37 - .../github.com/gogo/protobuf/gogoproto/doc.go | 169 -- .../gogo/protobuf/gogoproto/gogo.pb.go | 825 ------ .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 - .../gogo/protobuf/gogoproto/gogo.proto | 136 - .../gogo/protobuf/gogoproto/helper.go | 361 --- .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 234 -- .../github.com/gogo/protobuf/proto/decode.go | 978 ------- .../gogo/protobuf/proto/decode_gogo.go | 172 -- .../github.com/gogo/protobuf/proto/discard.go | 151 -- .../gogo/protobuf/proto/duration.go | 100 - .../gogo/protobuf/proto/duration_gogo.go | 203 -- .../github.com/gogo/protobuf/proto/encode.go | 1362 ---------- .../gogo/protobuf/proto/encode_gogo.go | 350 --- .../github.com/gogo/protobuf/proto/equal.go | 300 --- .../gogo/protobuf/proto/extensions.go | 693 ----- .../gogo/protobuf/proto/extensions_gogo.go | 294 --- vendor/github.com/gogo/protobuf/proto/lib.go | 897 ------- .../gogo/protobuf/proto/lib_gogo.go | 42 - .../gogo/protobuf/proto/message_set.go | 311 --- .../gogo/protobuf/proto/pointer_reflect.go | 484 ---- .../protobuf/proto/pointer_reflect_gogo.go | 85 - .../gogo/protobuf/proto/pointer_unsafe.go | 270 -- .../protobuf/proto/pointer_unsafe_gogo.go | 128 - .../gogo/protobuf/proto/properties.go | 971 ------- .../gogo/protobuf/proto/properties_gogo.go | 111 - .../gogo/protobuf/proto/skip_gogo.go | 119 - vendor/github.com/gogo/protobuf/proto/text.go | 939 ------- .../gogo/protobuf/proto/text_gogo.go | 57 - .../gogo/protobuf/proto/text_parser.go | 1013 -------- .../gogo/protobuf/proto/timestamp.go | 113 - .../gogo/protobuf/proto/timestamp_gogo.go | 229 -- .../protoc-gen-gogo/descriptor/Makefile | 36 - .../protoc-gen-gogo/descriptor/descriptor.go | 118 - .../descriptor/descriptor.pb.go | 2281 ----------------- .../descriptor/descriptor_gostring.gen.go | 772 ------ .../protoc-gen-gogo/descriptor/helper.go | 390 --- vendor/github.com/golang/protobuf/AUTHORS | 3 + .../github.com/golang/protobuf/CONTRIBUTORS | 3 + .../github.com/golang/snappy/encode_amd64.go | 2 +- vendor/github.com/golang/snappy/snappy.go | 2 +- vendor/github.com/gorilla/context/.travis.yml | 19 + .../op/go-logging => gorilla/context}/LICENSE | 8 +- vendor/github.com/gorilla/context/README.md | 10 + vendor/github.com/gorilla/context/context.go | 143 ++ vendor/github.com/gorilla/context/doc.go | 88 + vendor/github.com/onsi/ginkgo/before_pr.sh | 0 vendor/github.com/prometheus/procfs/ttar | 0 vendor/golang.org/x/net/context/context.go | 2 +- vendor/golang.org/x/net/html/atom/atom.go | 2 +- .../golang.org/x/net/html/charset/charset.go | 2 +- vendor/golang.org/x/net/html/doc.go | 2 +- vendor/golang.org/x/sys/unix/mkall.sh | 0 vendor/golang.org/x/sys/unix/mkerrors.sh | 0 vendor/golang.org/x/sys/unix/mksyscall.pl | 0 .../x/sys/unix/mksyscall_solaris.pl | 0 .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_darwin.pl | 0 .../x/sys/unix/mksysnum_dragonfly.pl | 0 .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 0 vendor/golang.org/x/sys/unix/syscall.go | 2 +- .../x/text/encoding/charmap/charmap.go | 2 +- vendor/golang.org/x/text/encoding/encoding.go | 2 +- .../x/text/encoding/japanese/tables.go | 2 +- .../x/text/encoding/korean/tables.go | 2 +- .../text/encoding/simplifiedchinese/tables.go | 2 +- .../encoding/traditionalchinese/tables.go | 2 +- .../x/text/encoding/unicode/unicode.go | 2 +- vendor/golang.org/x/text/internal/tag/tag.go | 2 +- vendor/golang.org/x/text/language/doc.go | 2 +- vendor/golang.org/x/text/runes/runes.go | 2 +- .../golang.org/x/text/transform/transform.go | 2 +- vendor/modules.txt | 144 ++ 228 files changed, 26387 insertions(+), 19226 deletions(-) create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/.travis.yml delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/CHANGELOG.md delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/CONTRIBUTORS delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/LICENSE delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/README.md delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/backend.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/format.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/level.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/log_nix.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/log_windows.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/logger.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/memory.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/multi.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/syslog.go delete mode 100644 vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/syslog_fallback.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/LICENSE create mode 100644 vendor/github.com/armPelionEdge/devicedb/alerts/alert.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/alerts/alert_map.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/alerts/alert_store.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/benchmarks/many_relays.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/bucket/bucket.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/bucket/bucket_list.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/bucket/builtin/cloud.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/bucket/builtin/default.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/bucket/builtin/local.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/bucket/builtin/lww.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/bucket/monitor.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/bucket/store.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client/api_client.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client/batch.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client/client.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client/entry.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client/entry_iterator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client_relay/client.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client_relay/entry_iterator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client_relay/update.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/client_relay/update_iterator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/cluster/command.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/cluster/config_controller.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/cluster/config_controller_builder.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/cluster/controller.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/cluster/delta.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/cluster/partitioner.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/cluster/state.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/clusterio/agent.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/clusterio/clusterio.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/clusterio/read_merger.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/clusterio/read_repairer.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/clusterio/sibling_set_merge_iterator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/compatibility/compatibility.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/data/dvv.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/data/hash.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/data/row.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/data/sibling.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/data/sibling_set.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/data/sibling_set_iterator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/data/update.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/error/errors.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/historian/historian.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/logging/logging.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/merkle/merkle.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/cluster_node.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/clusterio_node_client.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/clusterio_partition_resolver.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/errors.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/node.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/node_facade.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/node_state_coordinator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/options.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node/snapshotter.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/node_facade/node_coordinator_facade.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/partition/partition.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/partition/partition_factory.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/partition/partition_iterator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/partition/partition_pool.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/raft/memory_storage.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/raft/node.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/raft/storage.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/raft/transport.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/resolver/conflict_resolver.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/resolver/strategies/last_writer_wins.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/resolver/strategies/multi_value.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/rest/merkle.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/cluster.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/cluster_facade.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/kubernetes.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/log_dump.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/models.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/partitions.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/profile.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/prometheus.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/relays.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/sites.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/snapshot.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/routes/sync.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/server/cloud_server.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/server/event.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/server/peer.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/server/server.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/server/sync.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/shared/config.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/shared/gc.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/site/site.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/site/site_factory.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/site/site_iterator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/site/site_pool.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/site/site_pool_iterator.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/storage/metrics.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/storage/storageEngine.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/sync/bucket_proxy.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/sync/bucket_sync_http.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/sync/merkle_proxy.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/sync/sync_scheduler.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/canceler.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/chunk.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/downloader.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/transfer.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/transfer_agent.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/transfer_encoding.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/transfer_factory.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/transfer_partner_strategy.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/transfer_proposer.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transfer/transfer_transport.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/transport/transport.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/util/multilock.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/util/new_storage_driver.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/util/random_string.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/util/request.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/util/rw_try_lock.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/util/uuid.go create mode 100644 vendor/github.com/armPelionEdge/devicedb/version/version.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/logging/env_watcher.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/logging/logging.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/.travis.yml delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/CHANGELOG.md delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/CONTRIBUTORS delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/README.md delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/backend.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/format.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/level.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/log_nix.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/log_windows.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/logger.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/memory.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/multi.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/syslog.go delete mode 100644 vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/syslog_fallback.go delete mode 100644 vendor/github.com/gogo/protobuf/AUTHORS delete mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/gogo/protobuf/LICENSE delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto delete mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/decode_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go delete mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/golang/protobuf/AUTHORS create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/gorilla/context/.travis.yml rename vendor/github.com/{armpelionedge/edge-go-logger/vendor/github.com/op/go-logging => gorilla/context}/LICENSE (83%) create mode 100644 vendor/github.com/gorilla/context/README.md create mode 100644 vendor/github.com/gorilla/context/context.go create mode 100644 vendor/github.com/gorilla/context/doc.go mode change 100755 => 100644 vendor/github.com/onsi/ginkgo/before_pr.sh mode change 100755 => 100644 vendor/github.com/prometheus/procfs/ttar mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mkall.sh mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mkerrors.sh mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mksyscall.pl mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.pl mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mksysnum_darwin.pl mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl mode change 100755 => 100644 vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl create mode 100644 vendor/modules.txt diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..0afce37 --- /dev/null +++ b/go.mod @@ -0,0 +1,33 @@ +module devicedb + +go 1.12 + +require ( + github.com/WigWagCo/wigwag-go-logger v0.0.0-20181019204055-004c5464cc0e + github.com/armPelionEdge/devicedb v0.0.0-20200515102353-d24df289ab24 + github.com/armpelionedge/edge-go-logger v0.0.0-20190409215430-d3461e08601e // indirect + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect + github.com/coreos/etcd v0.0.0-20170725052840-d2654f852232 + github.com/gogo/protobuf v0.0.0-20180509162441-30cf7ac33676 // indirect + github.com/golang/protobuf v0.0.0-20170712042213-0a4f71a498b7 // indirect + github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec // indirect + github.com/google/uuid v0.0.0-20171129191014-dec09d789f3d + github.com/gorilla/context v1.1.1 // indirect + github.com/gorilla/mux v0.0.0-20160902153343-0a192a193177 + github.com/gorilla/websocket v0.0.0-20181012020131-76e4896901ef + github.com/mattn/go-runewidth v0.0.3 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84 + github.com/onsi/ginkgo v0.0.0-20171214073015-bc14b6691e7a + github.com/onsi/gomega v0.0.0-20171211090144-c1fb6682134d + github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 // indirect + github.com/prometheus/client_golang v0.0.0-20180416233856-82f5ff156b29 + github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 // indirect + github.com/prometheus/common v0.0.0-20180426121432-d811d2e9bf89 // indirect + github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d // indirect + github.com/syndtr/goleveldb v0.0.0-20160629101233-ab8b5dcf1042 + golang.org/x/net v0.0.0-20171212005608-d866cfc389ce + golang.org/x/sys v0.0.0-20180420145319-79b0c6888797 // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/yaml.v2 v2.0.0-20160715033755-e4d366fc3c79 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..5444f91 --- /dev/null +++ b/go.sum @@ -0,0 +1,52 @@ +github.com/WigWagCo/wigwag-go-logger v0.0.0-20181019204055-004c5464cc0e h1:Amr3mYmlmB/5YueqrWWG/7mo5Mv0EzQHhKtbgDptDUw= +github.com/WigWagCo/wigwag-go-logger v0.0.0-20181019204055-004c5464cc0e/go.mod h1:PxuH3eap4v67O06/zC+Vkrbza/VvsVJW7rHPuIl6B08= +github.com/armPelionEdge/devicedb v0.0.0-20200515102353-d24df289ab24 h1:j8ciCEom7nzhNQMfCWtLJ17OzMRVRizcZQ0fWOfep3I= +github.com/armPelionEdge/devicedb v0.0.0-20200515102353-d24df289ab24/go.mod h1:zqZsTuNG59smzMpXRRocnwHtrj362SiYspY9paliKqA= +github.com/armpelionedge/edge-go-logger v0.0.0-20190409215430-d3461e08601e/go.mod h1:1ItfoaDCSGAxd9XnSgYqa4UDisxBlohqm7Z3glZvlFo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/coreos/etcd v0.0.0-20170725052840-d2654f852232 h1:PdSxpOyp4/9dbmVhwme7ntBClhuhzyCdg1F0W6Kz1h8= +github.com/coreos/etcd v0.0.0-20170725052840-d2654f852232/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/gogo/protobuf v0.0.0-20180509162441-30cf7ac33676/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v0.0.0-20170712042213-0a4f71a498b7 h1:ulJ2cj/xlDlrwLCvWH4UeV9vJ/jXP6wEGgTSF7EOnmQ= +github.com/golang/protobuf v0.0.0-20170712042213-0a4f71a498b7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec h1:ZaSUjYC8aWT/om43c8YVz0SqjT8ABtqw7REbZGsCroE= +github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/uuid v0.0.0-20171129191014-dec09d789f3d h1:rXQlD9GXkjA/PQZhmEaF/8Pj/sJfdZJK7GJG0gkS8I0= +github.com/google/uuid v0.0.0-20171129191014-dec09d789f3d/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v0.0.0-20160902153343-0a192a193177 h1:tpmaYHErQrYbUpVhYR+wWZ2osVDTDkNrZqb1QGxN6EA= +github.com/gorilla/mux v0.0.0-20160902153343-0a192a193177/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20181012020131-76e4896901ef h1:h/5b4X1nDmIAUyeLm6wSUOHnGP3Et2t2eKWb1DqT4jU= +github.com/gorilla/websocket v0.0.0-20181012020131-76e4896901ef/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84 h1:fiKJgB4JDUd43CApkmCeTSQlWjtTtABrU2qsgbuP0BI= +github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20171214073015-bc14b6691e7a h1:XTJujLhqkWM1ZjzHwHoJOVIc3Md0LdwVkY+WCgSXrZw= +github.com/onsi/ginkgo v0.0.0-20171214073015-bc14b6691e7a/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20171211090144-c1fb6682134d h1:sWonj6zN4JQtFs9fhje3PtBzONOX3U9VBERNiUXGqdA= +github.com/onsi/gomega v0.0.0-20171211090144-c1fb6682134d/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/prometheus/client_golang v0.0.0-20180416233856-82f5ff156b29 h1:cQm+HhVskQGWd5IRnRXnQgQ1yJK98jyeEy2uBi3OFlM= +github.com/prometheus/client_golang v0.0.0-20180416233856-82f5ff156b29/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180426121432-d811d2e9bf89 h1:7KBNF1zLlfEkG0K4axOT7B9GNzaIse6ELQL/w48P7KI= +github.com/prometheus/common v0.0.0-20180426121432-d811d2e9bf89/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d h1:RCcsxyRr6+/pLg6wr0cUjPovhEhSNOtPh0SOz6u3hGU= +github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/syndtr/goleveldb v0.0.0-20160629101233-ab8b5dcf1042 h1:pD5B8K/5y7ZHg87F+eVmCuVCWD8lcjM9yNRugsOj87U= +github.com/syndtr/goleveldb v0.0.0-20160629101233-ab8b5dcf1042/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +golang.org/x/net v0.0.0-20171212005608-d866cfc389ce h1:4g3VPcb++AP2cNa6CQ0iACUoH7J/3Jxojq0mmJun9A4= +golang.org/x/net v0.0.0-20171212005608-d866cfc389ce/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sys v0.0.0-20180420145319-79b0c6888797 h1:ux9vYny+vlzqIcwoO6gRu+voPvKJA10ZceuJwWf2J88= +golang.org/x/sys v0.0.0-20180420145319-79b0c6888797/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/yaml.v2 v2.0.0-20160715033755-e4d366fc3c79 h1:mENkfeXGmLV7lIyBeNdwYWdONek7pH9yHaHMgZyvIWE= +gopkg.in/yaml.v2 v2.0.0-20160715033755-e4d366fc3c79/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/.travis.yml b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/.travis.yml deleted file mode 100644 index 70e012b..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - tip diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/CHANGELOG.md b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/CHANGELOG.md deleted file mode 100644 index 4b7d233..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/CHANGELOG.md +++ /dev/null @@ -1,19 +0,0 @@ -# Changelog - -## 2.0.0-rc1 (2016-02-11) - -Time flies and it has been three years since this package was first released. -There have been a couple of API changes I have wanted to do for some time but -I've tried to maintain backwards compatibility. Some inconsistencies in the -API have started to show, proper vendor support in Go out of the box and -the fact that `go vet` will give warnings -- I have decided to bump the major -version. - -* Make eg. `Info` and `Infof` do different things. You want to change all calls - to `Info` with a string format go to `Infof` etc. In many cases, `go vet` will - guide you. -* `Id` in `Record` is now called `ID` - -## 1.0.0 (2013-02-21) - -Initial release diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/CONTRIBUTORS b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/CONTRIBUTORS deleted file mode 100644 index 958416e..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/CONTRIBUTORS +++ /dev/null @@ -1,5 +0,0 @@ -Alec Thomas -Guilhem Lettron -Ivan Daniluk -Nimi Wariboko Jr -Róbert Selvek diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/LICENSE b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/LICENSE deleted file mode 100644 index f1f6cfc..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 Örjan Persson. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/README.md b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/README.md deleted file mode 100644 index 0a7326b..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/README.md +++ /dev/null @@ -1,93 +0,0 @@ -## Golang logging library - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/op/go-logging) [![build](https://img.shields.io/travis/op/go-logging.svg?style=flat)](https://travis-ci.org/op/go-logging) - -Package logging implements a logging infrastructure for Go. Its output format -is customizable and supports different logging backends like syslog, file and -memory. Multiple backends can be utilized with different log levels per backend -and logger. - -**_NOTE:_** backwards compatibility promise have been dropped for master. Please -vendor this package or use `gopkg.in/op/go-logging.v1` for previous version. See -[changelog](CHANGELOG.md) for details. - -## Example - -Let's have a look at an [example](examples/example.go) which demonstrates most -of the features found in this library. - -[![Example Output](examples/example.png)](examples/example.go) - -```go -package main - -import ( - "os" - - "github.com/op/go-logging" -) - -var log = logging.MustGetLogger("example") - -// Example format string. Everything except the message has a custom color -// which is dependent on the log level. Many fields have a custom output -// formatting too, eg. the time returns the hour down to the milli second. -var format = logging.MustStringFormatter( - `%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}`, -) - -// Password is just an example type implementing the Redactor interface. Any -// time this is logged, the Redacted() function will be called. -type Password string - -func (p Password) Redacted() interface{} { - return logging.Redact(string(p)) -} - -func main() { - // For demo purposes, create two backend for os.Stderr. - backend1 := logging.NewLogBackend(os.Stderr, "", 0) - backend2 := logging.NewLogBackend(os.Stderr, "", 0) - - // For messages written to backend2 we want to add some additional - // information to the output, including the used log level and the name of - // the function. - backend2Formatter := logging.NewBackendFormatter(backend2, format) - - // Only errors and more severe messages should be sent to backend1 - backend1Leveled := logging.AddModuleLevel(backend1) - backend1Leveled.SetLevel(logging.ERROR, "") - - // Set the backends to be used. - logging.SetBackend(backend1Leveled, backend2Formatter) - - log.Debugf("debug %s", Password("secret")) - log.Info("info") - log.Notice("notice") - log.Warning("warning") - log.Error("err") - log.Critical("crit") -} -``` - -## Installing - -### Using *go get* - - $ go get github.com/op/go-logging - -After this command *go-logging* is ready to use. Its source will be in: - - $GOPATH/src/pkg/github.com/op/go-logging - -You can use `go get -u` to update the package. - -## Documentation - -For docs, see http://godoc.org/github.com/op/go-logging or run: - - $ godoc github.com/op/go-logging - -## Additional resources - -* [wslog](https://godoc.org/github.com/cryptix/exp/wslog) -- exposes log messages through a WebSocket. diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/backend.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/backend.go deleted file mode 100644 index 74d9201..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/backend.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -// defaultBackend is the backend used for all logging calls. -var defaultBackend LeveledBackend - -// Backend is the interface which a log backend need to implement to be able to -// be used as a logging backend. -type Backend interface { - Log(Level, int, *Record) error -} - -// SetBackend replaces the backend currently set with the given new logging -// backend. -func SetBackend(backends ...Backend) LeveledBackend { - var backend Backend - if len(backends) == 1 { - backend = backends[0] - } else { - backend = MultiLogger(backends...) - } - - defaultBackend = AddModuleLevel(backend) - return defaultBackend -} - -// SetLevel sets the logging level for the specified module. The module -// corresponds to the string specified in GetLogger. -func SetLevel(level Level, module string) { - defaultBackend.SetLevel(level, module) -} - -// GetLevel returns the logging level for the specified module. -func GetLevel(module string) Level { - return defaultBackend.GetLevel(module) -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/format.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/format.go deleted file mode 100644 index 7160674..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/format.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "path" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -// TODO see Formatter interface in fmt/print.go -// TODO try text/template, maybe it have enough performance -// TODO other template systems? -// TODO make it possible to specify formats per backend? -type fmtVerb int - -const ( - fmtVerbTime fmtVerb = iota - fmtVerbLevel - fmtVerbID - fmtVerbPid - fmtVerbProgram - fmtVerbModule - fmtVerbMessage - fmtVerbLongfile - fmtVerbShortfile - fmtVerbLongpkg - fmtVerbShortpkg - fmtVerbLongfunc - fmtVerbShortfunc - fmtVerbCallpath - fmtVerbLevelColor - - // Keep last, there are no match for these below. - fmtVerbUnknown - fmtVerbStatic -) - -var fmtVerbs = []string{ - "time", - "level", - "id", - "pid", - "program", - "module", - "message", - "longfile", - "shortfile", - "longpkg", - "shortpkg", - "longfunc", - "shortfunc", - "callpath", - "color", -} - -const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00" - -var defaultVerbsLayout = []string{ - rfc3339Milli, - "s", - "d", - "d", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "0", - "", -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) -) - -func getFmtVerbByName(name string) fmtVerb { - for i, verb := range fmtVerbs { - if name == verb { - return fmtVerb(i) - } - } - return fmtVerbUnknown -} - -// Formatter is the required interface for a custom log record formatter. -type Formatter interface { - Format(calldepth int, r *Record, w io.Writer) error -} - -// formatter is used by all backends unless otherwise overriden. -var formatter struct { - sync.RWMutex - def Formatter -} - -func getFormatter() Formatter { - formatter.RLock() - defer formatter.RUnlock() - return formatter.def -} - -var ( - // DefaultFormatter is the default formatter used and is only the message. - DefaultFormatter = MustStringFormatter("%{message}") - - // GlogFormatter mimics the glog format - GlogFormatter = MustStringFormatter("%{level:.1s}%{time:0102 15:04:05.999999} %{pid} %{shortfile}] %{message}") -) - -// SetFormatter sets the default formatter for all new backends. A backend will -// fetch this value once it is needed to format a record. Note that backends -// will cache the formatter after the first point. For now, make sure to set -// the formatter before logging. -func SetFormatter(f Formatter) { - formatter.Lock() - defer formatter.Unlock() - formatter.def = f -} - -var formatRe = regexp.MustCompile(`%{([a-z]+)(?::(.*?[^\\]))?}`) - -type part struct { - verb fmtVerb - layout string -} - -// stringFormatter contains a list of parts which explains how to build the -// formatted string passed on to the logging backend. -type stringFormatter struct { - parts []part -} - -// NewStringFormatter returns a new Formatter which outputs the log record as a -// string based on the 'verbs' specified in the format string. -// -// The verbs: -// -// General: -// %{id} Sequence number for log message (uint64). -// %{pid} Process id (int) -// %{time} Time when log occurred (time.Time) -// %{level} Log level (Level) -// %{module} Module (string) -// %{program} Basename of os.Args[0] (string) -// %{message} Message (string) -// %{longfile} Full file name and line number: /a/b/c/d.go:23 -// %{shortfile} Final file name element and line number: d.go:23 -// %{callpath} Callpath like main.a.b.c...c "..." meaning recursive call ~. meaning truncated path -// %{color} ANSI color based on log level -// -// For normal types, the output can be customized by using the 'verbs' defined -// in the fmt package, eg. '%{id:04d}' to make the id output be '%04d' as the -// format string. -// -// For time.Time, use the same layout as time.Format to change the time format -// when output, eg "2006-01-02T15:04:05.999Z-07:00". -// -// For the 'color' verb, the output can be adjusted to either use bold colors, -// i.e., '%{color:bold}' or to reset the ANSI attributes, i.e., -// '%{color:reset}' Note that if you use the color verb explicitly, be sure to -// reset it or else the color state will persist past your log message. e.g., -// "%{color:bold}%{time:15:04:05} %{level:-8s}%{color:reset} %{message}" will -// just colorize the time and level, leaving the message uncolored. -// -// For the 'callpath' verb, the output can be adjusted to limit the printing -// the stack depth. i.e. '%{callpath:3}' will print '~.a.b.c' -// -// Colors on Windows is unfortunately not supported right now and is currently -// a no-op. -// -// There's also a couple of experimental 'verbs'. These are exposed to get -// feedback and needs a bit of tinkering. Hence, they might change in the -// future. -// -// Experimental: -// %{longpkg} Full package path, eg. github.com/go-logging -// %{shortpkg} Base package path, eg. go-logging -// %{longfunc} Full function name, eg. littleEndian.PutUint32 -// %{shortfunc} Base function name, eg. PutUint32 -// %{callpath} Call function path, eg. main.a.b.c -func NewStringFormatter(format string) (Formatter, error) { - var fmter = &stringFormatter{} - - // Find the boundaries of all %{vars} - matches := formatRe.FindAllStringSubmatchIndex(format, -1) - if matches == nil { - return nil, errors.New("logger: invalid log format: " + format) - } - - // Collect all variables and static text for the format - prev := 0 - for _, m := range matches { - start, end := m[0], m[1] - if start > prev { - fmter.add(fmtVerbStatic, format[prev:start]) - } - - name := format[m[2]:m[3]] - verb := getFmtVerbByName(name) - if verb == fmtVerbUnknown { - return nil, errors.New("logger: unknown variable: " + name) - } - - // Handle layout customizations or use the default. If this is not for the - // time, color formatting or callpath, we need to prefix with %. - layout := defaultVerbsLayout[verb] - if m[4] != -1 { - layout = format[m[4]:m[5]] - } - if verb != fmtVerbTime && verb != fmtVerbLevelColor && verb != fmtVerbCallpath { - layout = "%" + layout - } - - fmter.add(verb, layout) - prev = end - } - end := format[prev:] - if end != "" { - fmter.add(fmtVerbStatic, end) - } - - // Make a test run to make sure we can format it correctly. - t, err := time.Parse(time.RFC3339, "2010-02-04T21:00:57-08:00") - if err != nil { - panic(err) - } - testFmt := "hello %s" - r := &Record{ - ID: 12345, - Time: t, - Module: "logger", - Args: []interface{}{"go"}, - fmt: &testFmt, - } - if err := fmter.Format(0, r, &bytes.Buffer{}); err != nil { - return nil, err - } - - return fmter, nil -} - -// MustStringFormatter is equivalent to NewStringFormatter with a call to panic -// on error. -func MustStringFormatter(format string) Formatter { - f, err := NewStringFormatter(format) - if err != nil { - panic("Failed to initialized string formatter: " + err.Error()) - } - return f -} - -func (f *stringFormatter) add(verb fmtVerb, layout string) { - f.parts = append(f.parts, part{verb, layout}) -} - -func (f *stringFormatter) Format(calldepth int, r *Record, output io.Writer) error { - for _, part := range f.parts { - if part.verb == fmtVerbStatic { - output.Write([]byte(part.layout)) - } else if part.verb == fmtVerbTime { - output.Write([]byte(r.Time.Format(part.layout))) - } else if part.verb == fmtVerbLevelColor { - doFmtVerbLevelColor(part.layout, r.Level, output) - } else if part.verb == fmtVerbCallpath { - depth, err := strconv.Atoi(part.layout) - if err != nil { - depth = 0 - } - output.Write([]byte(formatCallpath(calldepth+1, depth))) - } else { - var v interface{} - switch part.verb { - case fmtVerbLevel: - v = r.Level - break - case fmtVerbID: - v = r.ID - break - case fmtVerbPid: - v = pid - break - case fmtVerbProgram: - v = program - break - case fmtVerbModule: - v = r.Module - break - case fmtVerbMessage: - v = r.Message() - break - case fmtVerbLongfile, fmtVerbShortfile: - _, file, line, ok := runtime.Caller(calldepth + 1) - if !ok { - file = "???" - line = 0 - } else if part.verb == fmtVerbShortfile { - file = filepath.Base(file) - } - v = fmt.Sprintf("%s:%d", file, line) - case fmtVerbLongfunc, fmtVerbShortfunc, - fmtVerbLongpkg, fmtVerbShortpkg: - // TODO cache pc - v = "???" - if pc, _, _, ok := runtime.Caller(calldepth + 1); ok { - if f := runtime.FuncForPC(pc); f != nil { - v = formatFuncName(part.verb, f.Name()) - } - } - default: - panic("unhandled format part") - } - fmt.Fprintf(output, part.layout, v) - } - } - return nil -} - -// formatFuncName tries to extract certain part of the runtime formatted -// function name to some pre-defined variation. -// -// This function is known to not work properly if the package path or name -// contains a dot. -func formatFuncName(v fmtVerb, f string) string { - i := strings.LastIndex(f, "/") - j := strings.Index(f[i+1:], ".") - if j < 1 { - return "???" - } - pkg, fun := f[:i+j+1], f[i+j+2:] - switch v { - case fmtVerbLongpkg: - return pkg - case fmtVerbShortpkg: - return path.Base(pkg) - case fmtVerbLongfunc: - return fun - case fmtVerbShortfunc: - i = strings.LastIndex(fun, ".") - return fun[i+1:] - } - panic("unexpected func formatter") -} - -func formatCallpath(calldepth int, depth int) string { - v := "" - callers := make([]uintptr, 64) - n := runtime.Callers(calldepth+2, callers) - oldPc := callers[n-1] - - start := n - 3 - if depth > 0 && start >= depth { - start = depth - 1 - v += "~." - } - recursiveCall := false - for i := start; i >= 0; i-- { - pc := callers[i] - if oldPc == pc { - recursiveCall = true - continue - } - oldPc = pc - if recursiveCall { - recursiveCall = false - v += ".." - } - if i < start { - v += "." - } - if f := runtime.FuncForPC(pc); f != nil { - v += formatFuncName(fmtVerbShortfunc, f.Name()) - } - } - return v -} - -// backendFormatter combines a backend with a specific formatter making it -// possible to have different log formats for different backends. -type backendFormatter struct { - b Backend - f Formatter -} - -// NewBackendFormatter creates a new backend which makes all records that -// passes through it beeing formatted by the specific formatter. -func NewBackendFormatter(b Backend, f Formatter) Backend { - return &backendFormatter{b, f} -} - -// Log implements the Log function required by the Backend interface. -func (bf *backendFormatter) Log(level Level, calldepth int, r *Record) error { - // Make a shallow copy of the record and replace any formatter - r2 := *r - r2.formatter = bf.f - return bf.b.Log(level, calldepth+1, &r2) -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/level.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/level.go deleted file mode 100644 index 98dd191..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/level.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "errors" - "strings" - "sync" -) - -// ErrInvalidLogLevel is used when an invalid log level has been used. -var ErrInvalidLogLevel = errors.New("logger: invalid log level") - -// Level defines all available log levels for log messages. -type Level int - -// Log levels. -const ( - CRITICAL Level = iota - ERROR - WARNING - NOTICE - INFO - DEBUG -) - -var levelNames = []string{ - "CRITICAL", - "ERROR", - "WARNING", - "NOTICE", - "INFO", - "DEBUG", -} - -// String returns the string representation of a logging level. -func (p Level) String() string { - return levelNames[p] -} - -// LogLevel returns the log level from a string representation. -func LogLevel(level string) (Level, error) { - for i, name := range levelNames { - if strings.EqualFold(name, level) { - return Level(i), nil - } - } - return ERROR, ErrInvalidLogLevel -} - -// Leveled interface is the interface required to be able to add leveled -// logging. -type Leveled interface { - GetLevel(string) Level - SetLevel(Level, string) - IsEnabledFor(Level, string) bool -} - -// LeveledBackend is a log backend with additional knobs for setting levels on -// individual modules to different levels. -type LeveledBackend interface { - Backend - Leveled -} - -type moduleLeveled struct { - levels map[string]Level - backend Backend - formatter Formatter - once sync.Once -} - -// AddModuleLevel wraps a log backend with knobs to have different log levels -// for different modules. -func AddModuleLevel(backend Backend) LeveledBackend { - var leveled LeveledBackend - var ok bool - if leveled, ok = backend.(LeveledBackend); !ok { - leveled = &moduleLeveled{ - levels: make(map[string]Level), - backend: backend, - } - } - return leveled -} - -// GetLevel returns the log level for the given module. -func (l *moduleLeveled) GetLevel(module string) Level { - level, exists := l.levels[module] - if exists == false { - level, exists = l.levels[""] - // no configuration exists, default to debug - if exists == false { - level = DEBUG - } - } - return level -} - -// SetLevel sets the log level for the given module. -func (l *moduleLeveled) SetLevel(level Level, module string) { - l.levels[module] = level -} - -// IsEnabledFor will return true if logging is enabled for the given module. -func (l *moduleLeveled) IsEnabledFor(level Level, module string) bool { - return level <= l.GetLevel(module) -} - -func (l *moduleLeveled) Log(level Level, calldepth int, rec *Record) (err error) { - if l.IsEnabledFor(level, rec.Module) { - // TODO get rid of traces of formatter here. BackendFormatter should be used. - rec.formatter = l.getFormatterAndCacheCurrent() - err = l.backend.Log(level, calldepth+1, rec) - } - return -} - -func (l *moduleLeveled) getFormatterAndCacheCurrent() Formatter { - l.once.Do(func() { - if l.formatter == nil { - l.formatter = getFormatter() - } - }) - return l.formatter -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/log_nix.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/log_nix.go deleted file mode 100644 index 4ff2ab1..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/log_nix.go +++ /dev/null @@ -1,109 +0,0 @@ -// +build !windows - -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "fmt" - "io" - "log" -) - -type color int - -const ( - ColorBlack = iota + 30 - ColorRed - ColorGreen - ColorYellow - ColorBlue - ColorMagenta - ColorCyan - ColorWhite -) - -var ( - colors = []string{ - CRITICAL: ColorSeq(ColorMagenta), - ERROR: ColorSeq(ColorRed), - WARNING: ColorSeq(ColorYellow), - NOTICE: ColorSeq(ColorGreen), - DEBUG: ColorSeq(ColorCyan), - } - boldcolors = []string{ - CRITICAL: ColorSeqBold(ColorMagenta), - ERROR: ColorSeqBold(ColorRed), - WARNING: ColorSeqBold(ColorYellow), - NOTICE: ColorSeqBold(ColorGreen), - DEBUG: ColorSeqBold(ColorCyan), - } -) - -// LogBackend utilizes the standard log module. -type LogBackend struct { - Logger *log.Logger - Color bool - ColorConfig []string -} - -// NewLogBackend creates a new LogBackend. -func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend { - return &LogBackend{Logger: log.New(out, prefix, flag)} -} - -// Log implements the Backend interface. -func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error { - if b.Color { - col := colors[level] - if len(b.ColorConfig) > int(level) && b.ColorConfig[level] != "" { - col = b.ColorConfig[level] - } - - buf := &bytes.Buffer{} - buf.Write([]byte(col)) - buf.Write([]byte(rec.Formatted(calldepth + 1))) - buf.Write([]byte("\033[0m")) - // For some reason, the Go logger arbitrarily decided "2" was the correct - // call depth... - return b.Logger.Output(calldepth+2, buf.String()) - } - - return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1)) -} - -// ConvertColors takes a list of ints representing colors for log levels and -// converts them into strings for ANSI color formatting -func ConvertColors(colors []int, bold bool) []string { - converted := []string{} - for _, i := range colors { - if bold { - converted = append(converted, ColorSeqBold(color(i))) - } else { - converted = append(converted, ColorSeq(color(i))) - } - } - - return converted -} - -func ColorSeq(color color) string { - return fmt.Sprintf("\033[%dm", int(color)) -} - -func ColorSeqBold(color color) string { - return fmt.Sprintf("\033[%d;1m", int(color)) -} - -func doFmtVerbLevelColor(layout string, level Level, output io.Writer) { - if layout == "bold" { - output.Write([]byte(boldcolors[level])) - } else if layout == "reset" { - output.Write([]byte("\033[0m")) - } else { - output.Write([]byte(colors[level])) - } -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/log_windows.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/log_windows.go deleted file mode 100644 index b8dc92c..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/log_windows.go +++ /dev/null @@ -1,107 +0,0 @@ -// +build windows -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "io" - "log" - "syscall" -) - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") -) - -// Character attributes -// Note: -// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). -// Clearing all foreground or background colors results in black; setting all creates white. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. -const ( - fgBlack = 0x0000 - fgBlue = 0x0001 - fgGreen = 0x0002 - fgCyan = 0x0003 - fgRed = 0x0004 - fgMagenta = 0x0005 - fgYellow = 0x0006 - fgWhite = 0x0007 - fgIntensity = 0x0008 - fgMask = 0x000F -) - -var ( - colors = []uint16{ - INFO: fgWhite, - CRITICAL: fgMagenta, - ERROR: fgRed, - WARNING: fgYellow, - NOTICE: fgGreen, - DEBUG: fgCyan, - } - boldcolors = []uint16{ - INFO: fgWhite | fgIntensity, - CRITICAL: fgMagenta | fgIntensity, - ERROR: fgRed | fgIntensity, - WARNING: fgYellow | fgIntensity, - NOTICE: fgGreen | fgIntensity, - DEBUG: fgCyan | fgIntensity, - } -) - -type file interface { - Fd() uintptr -} - -// LogBackend utilizes the standard log module. -type LogBackend struct { - Logger *log.Logger - Color bool - - // f is set to a non-nil value if the underlying writer which logs writes to - // implements the file interface. This makes us able to colorise the output. - f file -} - -// NewLogBackend creates a new LogBackend. -func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend { - b := &LogBackend{Logger: log.New(out, prefix, flag)} - - // Unfortunately, the API used only takes an io.Writer where the Windows API - // need the actual fd to change colors. - if f, ok := out.(file); ok { - b.f = f - } - - return b -} - -func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error { - if b.Color && b.f != nil { - buf := &bytes.Buffer{} - setConsoleTextAttribute(b.f, colors[level]) - buf.Write([]byte(rec.Formatted(calldepth + 1))) - err := b.Logger.Output(calldepth+2, buf.String()) - setConsoleTextAttribute(b.f, fgWhite) - return err - } - return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1)) -} - -// setConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func setConsoleTextAttribute(f file, attribute uint16) bool { - ok, _, _ := setConsoleTextAttributeProc.Call(f.Fd(), uintptr(attribute), 0) - return ok != 0 -} - -func doFmtVerbLevelColor(layout string, level Level, output io.Writer) { - // TODO not supported on Windows since the io.Writer here is actually a - // bytes.Buffer. -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/logger.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/logger.go deleted file mode 100644 index 535ed9b..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/logger.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package logging implements a logging infrastructure for Go. It supports -// different logging backends like syslog, file and memory. Multiple backends -// can be utilized with different log levels per backend and logger. -package logging - -import ( - "bytes" - "fmt" - "log" - "os" - "strings" - "sync/atomic" - "time" -) - -// Redactor is an interface for types that may contain sensitive information -// (like passwords), which shouldn't be printed to the log. The idea was found -// in relog as part of the vitness project. -type Redactor interface { - Redacted() interface{} -} - -// Redact returns a string of * having the same length as s. -func Redact(s string) string { - return strings.Repeat("*", len(s)) -} - -var ( - // Sequence number is incremented and utilized for all log records created. - sequenceNo uint64 - - // timeNow is a customizable for testing purposes. - timeNow = time.Now -) - -// Record represents a log record and contains the timestamp when the record -// was created, an increasing id, filename and line and finally the actual -// formatted log line. -type Record struct { - ID uint64 - Time time.Time - Module string - Level Level - Args []interface{} - - // message is kept as a pointer to have shallow copies update this once - // needed. - message *string - fmt *string - formatter Formatter - formatted string -} - -// Formatted returns the formatted log record string. -func (r *Record) Formatted(calldepth int) string { - if r.formatted == "" { - var buf bytes.Buffer - r.formatter.Format(calldepth+1, r, &buf) - r.formatted = buf.String() - } - return r.formatted -} - -// Message returns the log record message. -func (r *Record) Message() string { - if r.message == nil { - // Redact the arguments that implements the Redactor interface - for i, arg := range r.Args { - if redactor, ok := arg.(Redactor); ok == true { - r.Args[i] = redactor.Redacted() - } - } - var buf bytes.Buffer - if r.fmt != nil { - fmt.Fprintf(&buf, *r.fmt, r.Args...) - } else { - // use Fprintln to make sure we always get space between arguments - fmt.Fprintln(&buf, r.Args...) - buf.Truncate(buf.Len() - 1) // strip newline - } - msg := buf.String() - r.message = &msg - } - return *r.message -} - -// Logger is the actual logger which creates log records based on the functions -// called and passes them to the underlying logging backend. -type Logger struct { - Module string - backend LeveledBackend - haveBackend bool - - // ExtraCallDepth can be used to add additional call depth when getting the - // calling function. This is normally used when wrapping a logger. - ExtraCalldepth int -} - -// SetBackend overrides any previously defined backend for this logger. -func (l *Logger) SetBackend(backend LeveledBackend) { - l.backend = backend - l.haveBackend = true -} - -// TODO call NewLogger and remove MustGetLogger? - -// GetLogger creates and returns a Logger object based on the module name. -func GetLogger(module string) (*Logger, error) { - return &Logger{Module: module}, nil -} - -// MustGetLogger is like GetLogger but panics if the logger can't be created. -// It simplifies safe initialization of a global logger for eg. a package. -func MustGetLogger(module string) *Logger { - logger, err := GetLogger(module) - if err != nil { - panic("logger: " + module + ": " + err.Error()) - } - return logger -} - -// Reset restores the internal state of the logging library. -func Reset() { - // TODO make a global Init() method to be less magic? or make it such that - // if there's no backends at all configured, we could use some tricks to - // automatically setup backends based if we have a TTY or not. - sequenceNo = 0 - b := SetBackend(NewLogBackend(os.Stderr, "", log.LstdFlags)) - b.SetLevel(DEBUG, "") - SetFormatter(DefaultFormatter) - timeNow = time.Now -} - -// IsEnabledFor returns true if the logger is enabled for the given level. -func (l *Logger) IsEnabledFor(level Level) bool { - return defaultBackend.IsEnabledFor(level, l.Module) -} - -func (l *Logger) log(lvl Level, format *string, args ...interface{}) { - if !l.IsEnabledFor(lvl) { - return - } - - // Create the logging record and pass it in to the backend - record := &Record{ - ID: atomic.AddUint64(&sequenceNo, 1), - Time: timeNow(), - Module: l.Module, - Level: lvl, - fmt: format, - Args: args, - } - - // TODO use channels to fan out the records to all backends? - // TODO in case of errors, do something (tricky) - - // calldepth=2 brings the stack up to the caller of the level - // methods, Info(), Fatal(), etc. - // ExtraCallDepth allows this to be extended further up the stack in case we - // are wrapping these methods, eg. to expose them package level - if l.haveBackend { - l.backend.Log(lvl, 2+l.ExtraCalldepth, record) - return - } - - defaultBackend.Log(lvl, 2+l.ExtraCalldepth, record) -} - -// Fatal is equivalent to l.Critical(fmt.Sprint()) followed by a call to os.Exit(1). -func (l *Logger) Fatal(args ...interface{}) { - l.log(CRITICAL, nil, args...) - os.Exit(1) -} - -// Fatalf is equivalent to l.Critical followed by a call to os.Exit(1). -func (l *Logger) Fatalf(format string, args ...interface{}) { - l.log(CRITICAL, &format, args...) - os.Exit(1) -} - -// Panic is equivalent to l.Critical(fmt.Sprint()) followed by a call to panic(). -func (l *Logger) Panic(args ...interface{}) { - l.log(CRITICAL, nil, args...) - panic(fmt.Sprint(args...)) -} - -// Panicf is equivalent to l.Critical followed by a call to panic(). -func (l *Logger) Panicf(format string, args ...interface{}) { - l.log(CRITICAL, &format, args...) - panic(fmt.Sprintf(format, args...)) -} - -// Critical logs a message using CRITICAL as log level. -func (l *Logger) Critical(args ...interface{}) { - l.log(CRITICAL, nil, args...) -} - -// Criticalf logs a message using CRITICAL as log level. -func (l *Logger) Criticalf(format string, args ...interface{}) { - l.log(CRITICAL, &format, args...) -} - -// Error logs a message using ERROR as log level. -func (l *Logger) Error(args ...interface{}) { - l.log(ERROR, nil, args...) -} - -// Errorf logs a message using ERROR as log level. -func (l *Logger) Errorf(format string, args ...interface{}) { - l.log(ERROR, &format, args...) -} - -// Warning logs a message using WARNING as log level. -func (l *Logger) Warning(args ...interface{}) { - l.log(WARNING, nil, args...) -} - -// Warningf logs a message using WARNING as log level. -func (l *Logger) Warningf(format string, args ...interface{}) { - l.log(WARNING, &format, args...) -} - -// Notice logs a message using NOTICE as log level. -func (l *Logger) Notice(args ...interface{}) { - l.log(NOTICE, nil, args...) -} - -// Noticef logs a message using NOTICE as log level. -func (l *Logger) Noticef(format string, args ...interface{}) { - l.log(NOTICE, &format, args...) -} - -// Info logs a message using INFO as log level. -func (l *Logger) Info(args ...interface{}) { - l.log(INFO, nil, args...) -} - -// Infof logs a message using INFO as log level. -func (l *Logger) Infof(format string, args ...interface{}) { - l.log(INFO, &format, args...) -} - -// Debug logs a message using DEBUG as log level. -func (l *Logger) Debug(args ...interface{}) { - l.log(DEBUG, nil, args...) -} - -// Debugf logs a message using DEBUG as log level. -func (l *Logger) Debugf(format string, args ...interface{}) { - l.log(DEBUG, &format, args...) -} - -func init() { - Reset() -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/memory.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/memory.go deleted file mode 100644 index 8d5152c..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/memory.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -package logging - -import ( - "sync" - "sync/atomic" - "time" - "unsafe" -) - -// TODO pick one of the memory backends and stick with it or share interface. - -// InitForTesting is a convenient method when using logging in a test. Once -// called, the time will be frozen to January 1, 1970 UTC. -func InitForTesting(level Level) *MemoryBackend { - Reset() - - memoryBackend := NewMemoryBackend(10240) - - leveledBackend := AddModuleLevel(memoryBackend) - leveledBackend.SetLevel(level, "") - SetBackend(leveledBackend) - - timeNow = func() time.Time { - return time.Unix(0, 0).UTC() - } - return memoryBackend -} - -// Node is a record node pointing to an optional next node. -type node struct { - next *node - Record *Record -} - -// Next returns the next record node. If there's no node available, it will -// return nil. -func (n *node) Next() *node { - return n.next -} - -// MemoryBackend is a simple memory based logging backend that will not produce -// any output but merly keep records, up to the given size, in memory. -type MemoryBackend struct { - size int32 - maxSize int32 - head, tail unsafe.Pointer -} - -// NewMemoryBackend creates a simple in-memory logging backend. -func NewMemoryBackend(size int) *MemoryBackend { - return &MemoryBackend{maxSize: int32(size)} -} - -// Log implements the Log method required by Backend. -func (b *MemoryBackend) Log(level Level, calldepth int, rec *Record) error { - var size int32 - - n := &node{Record: rec} - np := unsafe.Pointer(n) - - // Add the record to the tail. If there's no records available, tail and - // head will both be nil. When we successfully set the tail and the previous - // value was nil, it's safe to set the head to the current value too. - for { - tailp := b.tail - swapped := atomic.CompareAndSwapPointer( - &b.tail, - tailp, - np, - ) - if swapped == true { - if tailp == nil { - b.head = np - } else { - (*node)(tailp).next = n - } - size = atomic.AddInt32(&b.size, 1) - break - } - } - - // Since one record was added, we might have overflowed the list. Remove - // a record if that is the case. The size will fluctate a bit, but - // eventual consistent. - if b.maxSize > 0 && size > b.maxSize { - for { - headp := b.head - head := (*node)(b.head) - if head.next == nil { - break - } - swapped := atomic.CompareAndSwapPointer( - &b.head, - headp, - unsafe.Pointer(head.next), - ) - if swapped == true { - atomic.AddInt32(&b.size, -1) - break - } - } - } - return nil -} - -// Head returns the oldest record node kept in memory. It can be used to -// iterate over records, one by one, up to the last record. -// -// Note: new records can get added while iterating. Hence the number of records -// iterated over might be larger than the maximum size. -func (b *MemoryBackend) Head() *node { - return (*node)(b.head) -} - -type event int - -const ( - eventFlush event = iota - eventStop -) - -// ChannelMemoryBackend is very similar to the MemoryBackend, except that it -// internally utilizes a channel. -type ChannelMemoryBackend struct { - maxSize int - size int - incoming chan *Record - events chan event - mu sync.Mutex - running bool - flushWg sync.WaitGroup - stopWg sync.WaitGroup - head, tail *node -} - -// NewChannelMemoryBackend creates a simple in-memory logging backend which -// utilizes a go channel for communication. -// -// Start will automatically be called by this function. -func NewChannelMemoryBackend(size int) *ChannelMemoryBackend { - backend := &ChannelMemoryBackend{ - maxSize: size, - incoming: make(chan *Record, 1024), - events: make(chan event), - } - backend.Start() - return backend -} - -// Start launches the internal goroutine which starts processing data from the -// input channel. -func (b *ChannelMemoryBackend) Start() { - b.mu.Lock() - defer b.mu.Unlock() - - // Launch the goroutine unless it's already running. - if b.running != true { - b.running = true - b.stopWg.Add(1) - go b.process() - } -} - -func (b *ChannelMemoryBackend) process() { - defer b.stopWg.Done() - for { - select { - case rec := <-b.incoming: - b.insertRecord(rec) - case e := <-b.events: - switch e { - case eventStop: - return - case eventFlush: - for len(b.incoming) > 0 { - b.insertRecord(<-b.incoming) - } - b.flushWg.Done() - } - } - } -} - -func (b *ChannelMemoryBackend) insertRecord(rec *Record) { - prev := b.tail - b.tail = &node{Record: rec} - if prev == nil { - b.head = b.tail - } else { - prev.next = b.tail - } - - if b.maxSize > 0 && b.size >= b.maxSize { - b.head = b.head.next - } else { - b.size++ - } -} - -// Flush waits until all records in the buffered channel have been processed. -func (b *ChannelMemoryBackend) Flush() { - b.flushWg.Add(1) - b.events <- eventFlush - b.flushWg.Wait() -} - -// Stop signals the internal goroutine to exit and waits until it have. -func (b *ChannelMemoryBackend) Stop() { - b.mu.Lock() - if b.running == true { - b.running = false - b.events <- eventStop - } - b.mu.Unlock() - b.stopWg.Wait() -} - -// Log implements the Log method required by Backend. -func (b *ChannelMemoryBackend) Log(level Level, calldepth int, rec *Record) error { - b.incoming <- rec - return nil -} - -// Head returns the oldest record node kept in memory. It can be used to -// iterate over records, one by one, up to the last record. -// -// Note: new records can get added while iterating. Hence the number of records -// iterated over might be larger than the maximum size. -func (b *ChannelMemoryBackend) Head() *node { - return b.head -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/multi.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/multi.go deleted file mode 100644 index 3731653..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/multi.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -// TODO remove Level stuff from the multi logger. Do one thing. - -// multiLogger is a log multiplexer which can be used to utilize multiple log -// backends at once. -type multiLogger struct { - backends []LeveledBackend -} - -// MultiLogger creates a logger which contain multiple loggers. -func MultiLogger(backends ...Backend) LeveledBackend { - var leveledBackends []LeveledBackend - for _, backend := range backends { - leveledBackends = append(leveledBackends, AddModuleLevel(backend)) - } - return &multiLogger{leveledBackends} -} - -// Log passes the log record to all backends. -func (b *multiLogger) Log(level Level, calldepth int, rec *Record) (err error) { - for _, backend := range b.backends { - if backend.IsEnabledFor(level, rec.Module) { - // Shallow copy of the record for the formatted cache on Record and get the - // record formatter from the backend. - r2 := *rec - if e := backend.Log(level, calldepth+1, &r2); e != nil { - err = e - } - } - } - return -} - -// GetLevel returns the highest level enabled by all backends. -func (b *multiLogger) GetLevel(module string) Level { - var level Level - for _, backend := range b.backends { - if backendLevel := backend.GetLevel(module); backendLevel > level { - level = backendLevel - } - } - return level -} - -// SetLevel propagates the same level to all backends. -func (b *multiLogger) SetLevel(level Level, module string) { - for _, backend := range b.backends { - backend.SetLevel(level, module) - } -} - -// IsEnabledFor returns true if any of the backends are enabled for it. -func (b *multiLogger) IsEnabledFor(level Level, module string) bool { - for _, backend := range b.backends { - if backend.IsEnabledFor(level, module) { - return true - } - } - return false -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/syslog.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/syslog.go deleted file mode 100644 index 4faa531..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/syslog.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !windows,!plan9 - -package logging - -import "log/syslog" - -// SyslogBackend is a simple logger to syslog backend. It automatically maps -// the internal log levels to appropriate syslog log levels. -type SyslogBackend struct { - Writer *syslog.Writer -} - -// NewSyslogBackend connects to the syslog daemon using UNIX sockets with the -// given prefix. If prefix is not given, the prefix will be derived from the -// launched command. -func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) { - var w *syslog.Writer - w, err = syslog.New(syslog.LOG_CRIT, prefix) - return &SyslogBackend{w}, err -} - -// NewSyslogBackendPriority is the same as NewSyslogBackend, but with custom -// syslog priority, like syslog.LOG_LOCAL3|syslog.LOG_DEBUG etc. -func NewSyslogBackendPriority(prefix string, priority syslog.Priority) (b *SyslogBackend, err error) { - var w *syslog.Writer - w, err = syslog.New(priority, prefix) - return &SyslogBackend{w}, err -} - -// Log implements the Backend interface. -func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error { - line := rec.Formatted(calldepth + 1) - switch level { - case CRITICAL: - return b.Writer.Crit(line) - case ERROR: - return b.Writer.Err(line) - case WARNING: - return b.Writer.Warning(line) - case NOTICE: - return b.Writer.Notice(line) - case INFO: - return b.Writer.Info(line) - case DEBUG: - return b.Writer.Debug(line) - default: - } - panic("unhandled log level") -} diff --git a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/syslog_fallback.go b/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/syslog_fallback.go deleted file mode 100644 index 91bc18d..0000000 --- a/vendor/github.com/WigWagCo/wigwag-go-logger/vendor/github.com/op/go-logging/syslog_fallback.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build windows plan9 - -package logging - -import ( - "fmt" -) - -type Priority int - -type SyslogBackend struct { -} - -func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) { - return nil, fmt.Errorf("Platform does not support syslog") -} - -func NewSyslogBackendPriority(prefix string, priority Priority) (b *SyslogBackend, err error) { - return nil, fmt.Errorf("Platform does not support syslog") -} - -func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error { - return fmt.Errorf("Platform does not support syslog") -} diff --git a/vendor/github.com/armPelionEdge/devicedb/LICENSE b/vendor/github.com/armPelionEdge/devicedb/LICENSE new file mode 100644 index 0000000..c52d180 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 jrife + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/armPelionEdge/devicedb/alerts/alert.go b/vendor/github.com/armPelionEdge/devicedb/alerts/alert.go new file mode 100644 index 0000000..17efd90 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/alerts/alert.go @@ -0,0 +1,33 @@ +package alerts +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type Alert struct { + Key string `json:"key"` + Level string `json:"level"` + Timestamp uint64 `json:"timestamp"` + Metadata interface{} `json:"metadata"` + Status bool `json:"status"` +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/alerts/alert_map.go b/vendor/github.com/armPelionEdge/devicedb/alerts/alert_map.go new file mode 100644 index 0000000..9504990 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/alerts/alert_map.go @@ -0,0 +1,93 @@ +package alerts +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "sync" +) + +type AlertStore interface { + Put(alert Alert) error + DeleteAll(alerts map[string]Alert) error + ForEach(func(alert Alert)) error +} + +type AlertMap struct { + mu sync.Mutex + alertStore AlertStore +} + +func NewAlertMap(alertStore AlertStore) *AlertMap { + return &AlertMap{ + alertStore: alertStore, + } +} + +func (alertMap *AlertMap) UpdateAlert(alert Alert) error { + alertMap.mu.Lock() + defer alertMap.mu.Unlock() + + return alertMap.alertStore.Put(alert) +} + +func (alertMap *AlertMap) GetAlerts() (map[string]Alert, error) { + var alerts map[string]Alert = make(map[string]Alert) + + err := alertMap.alertStore.ForEach(func(alert Alert) { + alerts[alert.Key] = alert + }) + + if err != nil { + return nil, err + } + + return alerts, nil +} + +// Blocks calls to UpdateAlert() +func (alertMap *AlertMap) ClearAlerts(alerts map[string]Alert) error { + alertMap.mu.Lock() + defer alertMap.mu.Unlock() + + var deleteAlerts map[string]Alert = make(map[string]Alert, len(alerts)) + + for _, a := range alerts { + deleteAlerts[a.Key] = a + } + + err := alertMap.alertStore.ForEach(func(alert Alert) { + if a, ok := alerts[alert.Key]; ok && alert.Timestamp != a.Timestamp { + // This shouldn't be deleted since its value was changed since + // reading. The new value will need to be forwarded later + delete(deleteAlerts, a.Key) + } + }) + + if err != nil { + return err + } + + return alertMap.alertStore.DeleteAll(deleteAlerts) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/alerts/alert_store.go b/vendor/github.com/armPelionEdge/devicedb/alerts/alert_store.go new file mode 100644 index 0000000..f2edfd3 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/alerts/alert_store.go @@ -0,0 +1,85 @@ +package alerts +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "github.com/armPelionEdge/devicedb/storage" +) + +type AlertStoreImpl struct { + storageDriver storage.StorageDriver +} + +func NewAlertStore(storageDriver storage.StorageDriver) *AlertStoreImpl { + return &AlertStoreImpl{ + storageDriver: storageDriver, + } +} + +func (alertStore *AlertStoreImpl) Put(alert Alert) error { + encodedAlert, err := json.Marshal(alert) + + if err != nil { + return err + } + + batch := storage.NewBatch() + batch.Put([]byte(alert.Key), encodedAlert) + + return alertStore.storageDriver.Batch(batch) +} + +func (alertStore *AlertStoreImpl) DeleteAll(alerts map[string]Alert) error { + batch := storage.NewBatch() + + for _, alert := range alerts { + batch.Delete([]byte(alert.Key)) + } + + return alertStore.storageDriver.Batch(batch) +} + +func (alertStore *AlertStoreImpl) ForEach(cb func(alert Alert)) error { + iter, err := alertStore.storageDriver.GetMatches([][]byte{ []byte{ } }) + + if err != nil { + return err + } + + defer iter.Release() + + for iter.Next() { + var alert Alert + + if err := json.Unmarshal(iter.Value(), &alert); err != nil { + return err + } + + cb(alert) + } + + return iter.Error() +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/benchmarks/many_relays.go b/vendor/github.com/armPelionEdge/devicedb/benchmarks/many_relays.go new file mode 100644 index 0000000..b069aca --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/benchmarks/many_relays.go @@ -0,0 +1,191 @@ +package benchmarks +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "os" + "time" + + "github.com/armPelionEdge/devicedb/bucket" + "github.com/armPelionEdge/devicedb/client" + "github.com/armPelionEdge/devicedb/data" + "github.com/armPelionEdge/devicedb/server" + "github.com/armPelionEdge/devicedb/sync" + "github.com/armPelionEdge/devicedb/util" +) + +// Simulates many relays distributed across multiple sites submitting updates at a set rate +func BenchmarkManyRelays(cloudAddresses []string, internalAddresses []string, nSites int, nRelaysPerSite int, updatesPerSecond int, syncPeriodMS int) { + //var relays []server.Server = make([]server.Server, 0, nSites * nRelaysPerSite) + var nextPort = 10000 + + apiClient := client.New(client.APIClientConfig{ + Servers: internalAddresses, + }) + + // Initialize cluster state + for i := 0; i < nSites; i++ { + siteID := fmt.Sprintf("site-%d", i) + + fmt.Fprintf(os.Stdout, "Create site %s\n", siteID) + + if err := apiClient.AddSite(context.TODO(), siteID); err != nil { + fmt.Fprintf(os.Stderr, "Error: Unable to create site: %v. Aborting...\n", err) + + os.Exit(1) + } + + for j := 0; j < nRelaysPerSite; j++ { + relayID := fmt.Sprintf("relay-%s-%d", siteID, j) + + fmt.Fprintf(os.Stdout, "Create relay %s\n", relayID) + + if err := apiClient.AddRelay(context.TODO(), relayID); err != nil { + fmt.Fprintf(os.Stderr, "Error: Unable to add relay: %v. Aborting...\n", err) + + os.Exit(1) + } + + fmt.Fprintf(os.Stdout, "Moving relay %s to site %s\n", relayID, siteID) + + if err := apiClient.MoveRelay(context.TODO(), relayID, siteID); err != nil { + fmt.Fprintf(os.Stderr, "Error: Unable to move relay: %v. Aborting...\n", err) + + os.Exit(1) + } + + relay, syncController, hub := makeRelay(relayID, syncPeriodMS, updatesPerSecond, nextPort) + nextPort++ + + go func() { + err := relay.Start() + + fmt.Fprintf(os.Stderr, "Error: Relay start error: %v. Aborting...\n", err) + + os.Exit(1) + }() + + go func(relay *server.Server, hub *server.Hub) { + if updatesPerSecond == 0 { + return + } + + for { + <-time.After(time.Second / time.Duration(updatesPerSecond)) + updateBatch := bucket.NewUpdateBatch() + key := util.RandomString() + value := util.RandomString() + fmt.Fprintf(os.Stderr, "Writing %s to key %s\n", value, key) + updateBatch.Put([]byte(key), []byte(value), data.NewDVV(data.NewDot("", 0), map[string]uint64{ })) + updatedSiblingSets, err := relay.Buckets().Get("default").Batch(updateBatch) + hub.BroadcastUpdate("", "default", updatedSiblingSets, 64) + + if err != nil { + fmt.Fprintf(os.Stderr, "Error: Unable to write batch: %v. Aborting...\n", err) + + os.Exit(1) + } + } + }(relay, hub) + + syncController.Start() + + randomCloudAddress := cloudAddresses[int(rand.Uint32() % uint32(len(cloudAddresses)))] + + if err := hub.ConnectCloud("cloud", randomCloudAddress, "", "", "", "", true); err != nil { + fmt.Fprintf(os.Stderr, "Error: Unable to initiate cloud connection process: %v. Aborting...\n", err) + + os.Exit(1) + } + } + } + + <-time.After(time.Minute) +} + +func makeRelay(relayID string, syncPeriodMS int, updatesPerSecond int, port int) (*server.Server, *server.SyncController, *server.Hub) { + serverTLS, clientTLS, err := loadCerts("WWRL000000") + + if err != nil { + fmt.Fprintf(os.Stderr, "Error: Unable to load certificates: %v. Aborting...\n", err.Error()) + + os.Exit(1) + } + + syncController := server.NewSyncController(2, nil, sync.NewPeriodicSyncScheduler(time.Millisecond * time.Duration(syncPeriodMS)), 1000) + hub := server.NewHub(relayID, syncController, clientTLS) + relay, _ := server.NewServer(server.ServerConfig{ + DBFile: "/tmp/testdb-" + util.RandomString(), + Port: port, + ServerTLS: serverTLS, + Hub: hub, + MerkleDepth: 5, + }) + + return relay, syncController, hub +} + +func loadCerts(id string) (*tls.Config, *tls.Config, error) { + clientCertificate, err := tls.LoadX509KeyPair("../test_certs/" + id + ".client.cert.pem", "../test_certs/" + id + ".client.key.pem") + + if err != nil { + return nil, nil, err + } + + serverCertificate, err := tls.LoadX509KeyPair("../test_certs/" + id + ".server.cert.pem", "../test_certs/" + id + ".server.key.pem") + + if err != nil { + return nil, nil, err + } + + rootCAChain, err := ioutil.ReadFile("../test_certs/ca-chain.cert.pem") + + if err != nil { + return nil, nil, err + } + + rootCAs := x509.NewCertPool() + if !rootCAs.AppendCertsFromPEM(rootCAChain) { + return nil, nil, errors.New("Could not append certs to chain") + } + + var serverTLSConfig = &tls.Config{ + Certificates: []tls.Certificate{ serverCertificate }, + ClientCAs: rootCAs, + } + var clientTLSConfig = &tls.Config{ + Certificates: []tls.Certificate{ clientCertificate }, + RootCAs: rootCAs, + } + + return serverTLSConfig, clientTLSConfig, nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/bucket/bucket.go b/vendor/github.com/armPelionEdge/devicedb/bucket/bucket.go new file mode 100644 index 0000000..a5c2b03 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/bucket/bucket.go @@ -0,0 +1,59 @@ +package bucket +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "errors" + + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/merkle" +) + +var ENoSuchBucket = errors.New("No such bucket") + +type Bucket interface { + Name() string + ShouldReplicateOutgoing(peerID string) bool + ShouldReplicateIncoming(peerID string) bool + ShouldAcceptWrites(clientID string) bool + ShouldAcceptReads(clientID string) bool + RecordMetadata() error + RebuildMerkleLeafs() error + MerkleTree() *MerkleTree + GarbageCollect(tombstonePurgeAge uint64) error + Get(keys [][]byte) ([]*SiblingSet, error) + GetMatches(keys [][]byte) (SiblingSetIterator, error) + GetSyncChildren(nodeID uint32) (SiblingSetIterator, error) + GetAll() (SiblingSetIterator, error) + Forget(keys [][]byte) error + Batch(batch *UpdateBatch) (map[string]*SiblingSet, error) + Merge(siblingSets map[string]*SiblingSet) error + Watch(ctx context.Context, keys [][]byte, prefixes [][]byte, localVersion uint64, ch chan Row) + LockWrites() + UnlockWrites() + LockReads() + UnlockReads() +} diff --git a/vendor/github.com/armPelionEdge/devicedb/bucket/bucket_list.go b/vendor/github.com/armPelionEdge/devicedb/bucket/bucket_list.go new file mode 100644 index 0000000..b556f51 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/bucket/bucket_list.go @@ -0,0 +1,93 @@ +package bucket +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +// each namespace in the database has two main factors that differentiate it from other namespaces or buckets +// 1) Replication strategy +// . some buckets are incoming only, we never send data from the bucket out to another node +// - but are only incoming from a specific other node, i.e. some master node +// . some buckets are both incoming and outgoing. Every database node shares with every other node +// . some buckets can be neither, storing data local to that node only +// 2) Conflict resolution strategy +// . whether or not to merge conflicting siblings +// . the way in which conflicting sibilngs are merged into one sibling + +type BucketList struct { + buckets map[string]Bucket +} + +func NewBucketList() *BucketList { + return &BucketList{ make(map[string]Bucket) } +} + +func (bucketList *BucketList) AddBucket(bucket Bucket) *BucketList { + bucketList.buckets[bucket.Name()] = bucket + + return bucketList +} + +func (bucketList *BucketList) Outgoing(peerID string) []Bucket { + buckets := make([]Bucket, 0, len(bucketList.buckets)) + + for _, bucket := range bucketList.buckets { + if bucket.ShouldReplicateOutgoing(peerID) { + buckets = append(buckets, bucket) + } + } + + return buckets +} + +func (bucketList *BucketList) Incoming(peerID string) []Bucket { + buckets := make([]Bucket, 0, len(bucketList.buckets)) + + for _, bucket := range bucketList.buckets { + if bucket.ShouldReplicateIncoming(peerID) { + buckets = append(buckets, bucket) + } + } + + return buckets +} + +func (bucketList *BucketList) All() []Bucket { + buckets := make([]Bucket, 0, len(bucketList.buckets)) + + for _, bucket := range bucketList.buckets { + buckets = append(buckets, bucket) + } + + return buckets +} + +func (bucketList *BucketList) HasBucket(bucketName string) bool { + _, ok := bucketList.buckets[bucketName] + + return ok +} + +func (bucketList *BucketList) Get(bucketName string) Bucket { + return bucketList.buckets[bucketName] +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/cloud.go b/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/cloud.go new file mode 100644 index 0000000..78b94da --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/cloud.go @@ -0,0 +1,76 @@ +package builtin +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/storage" + . "github.com/armPelionEdge/devicedb/resolver/strategies" +) + +const ( + CloudMode = iota + RelayMode = iota + CloudPeerID = "cloud" +) + +type CloudBucket struct { + Store + mode int +} + +func NewCloudBucket(nodeID string, storageDriver StorageDriver, merkleDepth uint8, mode int) (*CloudBucket, error) { + cloudBucket := &CloudBucket{ + mode: mode, + } + + err := cloudBucket.Initialize(nodeID, storageDriver, merkleDepth, &MultiValue{}) + + if err != nil { + return nil, err + } + + return cloudBucket, nil +} + +func (cloudBucket *CloudBucket) Name() string { + return "cloud" +} + +func (cloudBucket *CloudBucket) ShouldReplicateOutgoing(peerID string) bool { + return cloudBucket.mode == CloudMode +} + +func (cloudBucket *CloudBucket) ShouldReplicateIncoming(peerID string) bool { + return cloudBucket.mode == RelayMode && peerID == CloudPeerID +} + +func (cloudBucket *CloudBucket) ShouldAcceptWrites(clientID string) bool { + return cloudBucket.mode == CloudMode +} + +func (cloudBucket *CloudBucket) ShouldAcceptReads(clientID string) bool { + return true +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/default.go b/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/default.go new file mode 100644 index 0000000..74b6e67 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/default.go @@ -0,0 +1,67 @@ +package builtin +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/storage" + . "github.com/armPelionEdge/devicedb/resolver/strategies" +) + +type DefaultBucket struct { + Store +} + +func NewDefaultBucket(nodeID string, storageDriver StorageDriver, merkleDepth uint8) (*DefaultBucket, error) { + defaultBucket := &DefaultBucket{} + + err := defaultBucket.Initialize(nodeID, storageDriver, merkleDepth, &MultiValue{}) + + if err != nil { + return nil, err + } + + return defaultBucket, nil +} + +func (defaultBucket *DefaultBucket) Name() string { + return "default" +} + +func (defaultBucket *DefaultBucket) ShouldReplicateOutgoing(peerID string) bool { + return true +} + +func (defaultBucket *DefaultBucket) ShouldReplicateIncoming(peerID string) bool { + return true +} + +func (defaultBucket *DefaultBucket) ShouldAcceptWrites(clientID string) bool { + return true +} + +func (defaultBucket *DefaultBucket) ShouldAcceptReads(clientID string) bool { + return true +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/local.go b/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/local.go new file mode 100644 index 0000000..459a63b --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/local.go @@ -0,0 +1,67 @@ +package builtin +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/storage" + . "github.com/armPelionEdge/devicedb/resolver/strategies" +) + +type LocalBucket struct { + Store +} + +func NewLocalBucket(nodeID string, storageDriver StorageDriver, merkleDepth uint8) (*LocalBucket, error) { + localBucket := &LocalBucket{} + + err := localBucket.Initialize(nodeID, storageDriver, merkleDepth, &MultiValue{}) + + if err != nil { + return nil, err + } + + return localBucket, nil +} + +func (localBucket *LocalBucket) Name() string { + return "local" +} + +func (localBucket *LocalBucket) ShouldReplicateOutgoing(peerID string) bool { + return false +} + +func (localBucket *LocalBucket) ShouldReplicateIncoming(peerID string) bool { + return false +} + +func (localBucket *LocalBucket) ShouldAcceptWrites(clientID string) bool { + return true +} + +func (localBucket *LocalBucket) ShouldAcceptReads(clientID string) bool { + return true +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/lww.go b/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/lww.go new file mode 100644 index 0000000..2ca83a3 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/bucket/builtin/lww.go @@ -0,0 +1,67 @@ +package builtin +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/storage" + . "github.com/armPelionEdge/devicedb/resolver/strategies" +) + +type LWWBucket struct { + Store +} + +func NewLWWBucket(nodeID string, storageDriver StorageDriver, merkleDepth uint8) (*LWWBucket, error) { + lwwBucket := &LWWBucket{} + + err := lwwBucket.Initialize(nodeID, storageDriver, merkleDepth, &LastWriterWins{}) + + if err != nil { + return nil, err + } + + return lwwBucket, nil +} + +func (lwwBucket *LWWBucket) Name() string { + return "lww" +} + +func (lwwBucket *LWWBucket) ShouldReplicateOutgoing(peerID string) bool { + return true +} + +func (lwwBucket *LWWBucket) ShouldReplicateIncoming(peerID string) bool { + return true +} + +func (lwwBucket *LWWBucket) ShouldAcceptWrites(clientID string) bool { + return true +} + +func (lwwBucket *LWWBucket) ShouldAcceptReads(clientID string) bool { + return true +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/bucket/monitor.go b/vendor/github.com/armPelionEdge/devicedb/bucket/monitor.go new file mode 100644 index 0000000..14b2406 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/bucket/monitor.go @@ -0,0 +1,184 @@ +package bucket +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "container/heap" + "context" + "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/logging" + "strings" + "sync" +) + +type UpdateHeap []*data.Row + +func (h UpdateHeap) Len() int { + return len(h) +} + +func (h UpdateHeap) Less(i, j int) bool { + return h[i].LocalVersion < h[j].LocalVersion +} + +func (h UpdateHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *UpdateHeap) Push(x interface{}) { + *h = append(*h, x.(*data.Row)) +} + +func (h *UpdateHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n - 1] + *h = old[0 : n - 1] + + return x +} + +type listener struct { + keys [][]byte + prefixes [][]byte + ch chan data.Row +} + +func (l *listener) isListeningFor(update data.Row) bool { + for _, key := range l.keys { + if update.Key == string(key) { + return true + } + } + + for _, prefix := range l.prefixes { + if strings.HasPrefix(update.Key, string(prefix)) { + return true + } + } + + return false +} + +type Monitor struct { + listeners map[*listener]bool + mu sync.Mutex + previousVersion uint64 + updateHeap *UpdateHeap +} + +func NewMonitor(startVersion uint64) *Monitor { + updateHeap := &UpdateHeap{ } + heap.Init(updateHeap) + + return &Monitor{ + listeners: make(map[*listener]bool), + previousVersion: startVersion, + updateHeap: updateHeap, + } +} + +func (monitor *Monitor) AddListener(ctx context.Context, keys [][]byte, prefixes [][]byte, ch chan data.Row) { + monitor.mu.Lock() + defer monitor.mu.Unlock() + + Log.Infof("Add listener for keys %v and prefixes %v", keys, prefixes) + + var newListener listener + + newListener.keys = keys + newListener.prefixes = prefixes + newListener.ch = ch + monitor.listeners[&newListener] = true + + go func() { + <-ctx.Done() + monitor.mu.Lock() + defer monitor.mu.Unlock() + Log.Infof("Remove listener for keys %v and prefixes %v", keys, prefixes) + delete(monitor.listeners, &newListener) + close(ch) + }() +} + +func (monitor *Monitor) Notify(update data.Row) { + monitor.mu.Lock() + defer monitor.mu.Unlock() + + monitor.submitUpdate(update) +} + +// This should be called if an ID range was reserved for a series of updates but those +// updates failed to be submitted so the range needs to be discarded +func (monitor *Monitor) DiscardIDRange(low uint64, high uint64) { + monitor.mu.Lock() + defer monitor.mu.Unlock() + + for i := low; i <= high; i++ { + monitor.submitUpdate(data.Row{ Key: "", LocalVersion: i, Siblings: nil }) + } +} + +func (monitor *Monitor) submitUpdate(update data.Row) { + if update.LocalVersion < monitor.previousVersion || update.LocalVersion == monitor.previousVersion && update.LocalVersion != 0 { + Log.Criticalf("An update was submitted to the monitor with key %s and version %d but the lowest expected version is %d. This update will not be sent. This should not happen and represents a bug in the watcher system.", update.Key, update.LocalVersion, monitor.previousVersion) + + return + } + + heap.Push(monitor.updateHeap, &update) + + monitor.flushUpdates() +} + +func (monitor *Monitor) flushUpdates() { + h := *monitor.updateHeap + + // The reason this has to check if the localversion is 0 is in case it is the first update ever submitted. + for monitor.updateHeap.Len() > 0 && (h[0].LocalVersion == 0 || h[0].LocalVersion == monitor.previousVersion + 1) { + monitor.previousVersion = h[0].LocalVersion + h = *monitor.updateHeap + nextUpdate := heap.Pop(monitor.updateHeap).(*data.Row) + + if nextUpdate.Key == "" { + // indicates a discarded update index. should just skip this + continue + } + + monitor.sendUpdate(*nextUpdate) + } +} + +func (monitor *Monitor) sendUpdate(update data.Row) { + if len(monitor.listeners) > 0 { + Log.Debugf("Monitor notifying listeners of update %d to key %s", update.LocalVersion, update.Key) + } + + for l, _ := range monitor.listeners { + if l.isListeningFor(update) { + l.ch <- update + } + } +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/bucket/store.go b/vendor/github.com/armPelionEdge/devicedb/bucket/store.go new file mode 100644 index 0000000..ca45bb6 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/bucket/store.go @@ -0,0 +1,1462 @@ + +package bucket +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "io" + "encoding/json" + "encoding/binary" + "time" + "errors" + "sort" + "sync" + "sync/atomic" + + . "github.com/armPelionEdge/devicedb/storage" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/merkle" + . "github.com/armPelionEdge/devicedb/util" + . "github.com/armPelionEdge/devicedb/resolver" + . "github.com/armPelionEdge/devicedb/resolver/strategies" +) + +const MAX_SORTING_KEY_LENGTH = 255 +const StorageFormatVersion = "1" +const UpgradeFormatBatchSize = 100 + +var MASTER_MERKLE_TREE_PREFIX = []byte{ 0 } +var PARTITION_MERKLE_LEAF_PREFIX = []byte{ 1 } +var PARTITION_DATA_PREFIX = []byte{ 2 } +var NODE_METADATA_PREFIX = []byte{ 3 } + +func NanoToMilli(v uint64) uint64 { + return v / 1000000 +} + +func nodeBytes(node uint32) []byte { + bytes := make([]byte, 4) + + binary.BigEndian.PutUint32(bytes, node) + + return bytes +} + +func encodeMerkleLeafKey(nodeID uint32) []byte { + nodeIDEncoding := nodeBytes(nodeID) + result := make([]byte, 0, len(MASTER_MERKLE_TREE_PREFIX) + len(nodeIDEncoding)) + + result = append(result, MASTER_MERKLE_TREE_PREFIX...) + result = append(result, nodeIDEncoding...) + + return result +} + +func decodeMerkleLeafKey(k []byte) (uint32, error) { + k = k[len(MASTER_MERKLE_TREE_PREFIX):] + + if len(k) != 4 { + return 0, errors.New("Invalid merkle leaf key") + } + + return binary.BigEndian.Uint32(k[:4]), nil +} + +func encodePartitionDataKey(k []byte) []byte { + result := make([]byte, 0, len(PARTITION_DATA_PREFIX) + len(k)) + + result = append(result, PARTITION_DATA_PREFIX...) + result = append(result, k...) + + return result +} + +func decodePartitionDataKey(k []byte) []byte { + return k[len(PARTITION_DATA_PREFIX):] +} + +func encodePartitionMerkleLeafKey(nodeID uint32, k []byte) []byte { + nodeIDEncoding := nodeBytes(nodeID) + result := make([]byte, 0, len(PARTITION_MERKLE_LEAF_PREFIX) + len(nodeIDEncoding) + len(k)) + + result = append(result, PARTITION_MERKLE_LEAF_PREFIX...) + result = append(result, nodeIDEncoding...) + result = append(result, k...) + + return result +} + +func decodePartitionMerkleLeafKey(k []byte) (uint32, []byte, error) { + if len(k) < len(PARTITION_MERKLE_LEAF_PREFIX) + 5 { + return 0, nil, errors.New("Invalid partition merkle leaf key") + } + + k = k[len(PARTITION_MERKLE_LEAF_PREFIX):] + + return binary.BigEndian.Uint32(k[:4]), k[4:], nil +} + +func encodeMetadataKey(k []byte) []byte { + result := make([]byte, 0, len(NODE_METADATA_PREFIX) + len(k)) + result = append(result, PARTITION_MERKLE_LEAF_PREFIX...) + result = append(result, k...) + + return result +} + +type Store struct { + nextRowID uint64 + nodeID string + storageDriver StorageDriver + merkleTree *MerkleTree + multiLock *MultiLock + writesTryLock RWTryLock + readsTryLock RWTryLock + merkleLock *MultiLock + conflictResolver ConflictResolver + storageFormatVersion string + monitor *Monitor + watcherLock sync.Mutex +} + +func (store *Store) Initialize(nodeID string, storageDriver StorageDriver, merkleDepth uint8, conflictResolver ConflictResolver) error { + if conflictResolver == nil { + conflictResolver = &MultiValue{} + } + + store.nodeID = nodeID + store.storageDriver = storageDriver + store.multiLock = NewMultiLock() + store.merkleLock = NewMultiLock() + store.conflictResolver = conflictResolver + store.merkleTree, _ = NewMerkleTree(merkleDepth) + + var err error + dbMerkleDepth, storageFormatVersion, err := store.getStoreMetadata() + + if err != nil { + Log.Errorf("Error retrieving database metadata for node %s: %v", nodeID, err) + + return err + } + + store.storageFormatVersion = storageFormatVersion + + if dbMerkleDepth != merkleDepth || storageFormatVersion != StorageFormatVersion { + if dbMerkleDepth != merkleDepth { + Log.Debugf("Initializing node %s rebuilding merkle leafs with depth %d", nodeID, merkleDepth) + + err = store.RebuildMerkleLeafs() + + if err != nil { + Log.Errorf("Error rebuilding merkle leafs for node %s: %v", nodeID, err) + + return err + } + } + + if storageFormatVersion != StorageFormatVersion { + Log.Debugf("Initializing node %s. It is using an older storage format. Its keys need to be updated to the new storage format...", nodeID) + + err = store.UpgradeStorageFormat() + + if err != nil { + Log.Errorf("Error while upgrading the storage format for node %s: %v", nodeID, err) + + return err + } + + Log.Debugf("Upgrade to the latest storage format was successful. Now at storage format %s", StorageFormatVersion) + } + + err = store.RecordMetadata() + + if err != nil { + Log.Errorf("Error recording merkle depth metadata for node %s: %v", nodeID, err) + + return err + } + } + + err = store.calculateNextRowID() + + if err != nil { + Log.Errorf("Error attempting to determine what the next row ID should be at node %s: %v", nodeID, err) + + return err + } + + err = store.initializeMerkleTree() + + if err != nil { + Log.Errorf("Error initializing node %s: %v", nodeID, err) + + return err + } + + if store.nextRowID == 0 { + store.monitor = NewMonitor(0) + } else { + store.monitor = NewMonitor(store.nextRowID - 1) + } + + return nil +} + +func (store *Store) initializeMerkleTree() error { + iter, err := store.storageDriver.GetMatches([][]byte{ MASTER_MERKLE_TREE_PREFIX }) + + if err != nil { + return err + } + + defer iter.Release() + + for iter.Next() { + key := iter.Key() + value := iter.Value() + nodeID, err := decodeMerkleLeafKey(key) + + if err != nil { + return err + } + + if !store.merkleTree.IsLeaf(nodeID) { + return errors.New("Invalid leaf node in master merkle keys") + } + + hash := Hash{ } + high := binary.BigEndian.Uint64(value[:8]) + low := binary.BigEndian.Uint64(value[8:]) + hash = hash.SetLow(low).SetHigh(high) + + store.merkleTree.UpdateLeafHash(nodeID, hash) + } + + if iter.Error() != nil { + return iter.Error() + } + + return nil +} + +func (store *Store) calculateNextRowID() error { + iter, err := store.GetAll() + + if err != nil { + return err + } + + store.nextRowID = 0 + defer iter.Release() + + for iter.Next() { + if iter.LocalVersion() >= store.nextRowID { + store.nextRowID = iter.LocalVersion() + 1 + } + } + + Log.Infof("Next row ID = %d", store.nextRowID) + + return iter.Error() +} + +func (store *Store) getStoreMetadata() (uint8, string, error) { + values, err := store.storageDriver.Get([][]byte{ encodeMetadataKey([]byte("merkleDepth")), encodeMetadataKey([]byte("storageFormatVersion")) }) + + if err != nil { + return 0, "", err + } + + var merkleDepth uint8 + var storageFormatVersion string = "0" + + if values[0] != nil { + merkleDepth = uint8(values[0][0]) + } + + if values[1] != nil { + storageFormatVersion = string(values[1]) + } + + return merkleDepth, storageFormatVersion, nil +} + +func (store *Store) RecordMetadata() error { + batch := NewBatch() + + batch.Put(encodeMetadataKey([]byte("merkleDepth")), []byte{ byte(store.merkleTree.Depth()) }) + batch.Put(encodeMetadataKey([]byte("storageFormatVersion")), []byte(StorageFormatVersion)) + + err := store.storageDriver.Batch(batch) + + if err != nil { + return err + } + + return nil +} + +func (store *Store) RebuildMerkleLeafs() error { + // Delete all keys starting with MASTER_MERKLE_TREE_PREFIX or PARTITION_MERKLE_LEAF_PREFIX + iter, err := store.storageDriver.GetMatches([][]byte{ MASTER_MERKLE_TREE_PREFIX, PARTITION_MERKLE_LEAF_PREFIX }) + + if err != nil { + return err + } + + defer iter.Release() + + for iter.Next() { + batch := NewBatch() + batch.Delete(iter.Key()) + err := store.storageDriver.Batch(batch) + + if err != nil { + return err + } + } + + if iter.Error() != nil { + return iter.Error() + } + + iter.Release() + + // Scan through all the keys in this node and rebuild the merkle tree + merkleTree, _ := NewMerkleTree(store.merkleTree.Depth()) + iter, err = store.storageDriver.GetMatches([][]byte{ PARTITION_DATA_PREFIX }) + + if err != nil { + return err + } + + siblingSetIterator := NewBasicSiblingSetIterator(iter, store.storageFormatVersion) + + defer siblingSetIterator.Release() + + for siblingSetIterator.Next() { + key := siblingSetIterator.Key() + siblingSet := siblingSetIterator.Value() + update := NewUpdate().AddDiff(string(key), nil, siblingSet) + + _, leafNodes := merkleTree.Update(update) + batch := NewBatch() + + for leafID, _ := range leafNodes { + for key, _:= range leafNodes[leafID] { + batch.Put(encodePartitionMerkleLeafKey(leafID, []byte(key)), []byte{ }) + } + } + + err := store.storageDriver.Batch(batch) + + if err != nil { + return err + } + } + + for leafID := uint32(1); leafID < merkleTree.NodeLimit(); leafID += 2 { + batch := NewBatch() + + if merkleTree.NodeHash(leafID).High() != 0 || merkleTree.NodeHash(leafID).Low() != 0 { + leafHash := merkleTree.NodeHash(leafID).Bytes() + batch.Put(encodeMerkleLeafKey(leafID), leafHash[:]) + + err := store.storageDriver.Batch(batch) + + if err != nil { + return err + } + } + } + + return siblingSetIterator.Error() +} + +func (store *Store) UpgradeStorageFormat() error { + iter, err := store.GetAll() + + if err != nil { + Log.Errorf("Unable to create iterator to upgrade storage format: %v", err.Error()) + + return err + } + + store.nextRowID = 0 + defer iter.Release() + + batchSize := 0 + batch := NewBatch() + + for iter.Next() { + key := iter.Key() + value := iter.Value() + row := &Row{ LocalVersion: store.nextRowID, Siblings: value } + + store.nextRowID++ + + batch.Put(encodePartitionDataKey(key), row.Encode()) + batchSize++ + + if batchSize == UpgradeFormatBatchSize { + if err := store.storageDriver.Batch(batch); err != nil { + return err + } + + batch = NewBatch() + batchSize = 0 + } + } + + if batchSize > 0 { + if err := store.storageDriver.Batch(batch); err != nil { + return err + } + } + + if iter.Error() != nil { + Log.Errorf("Unable to iterate through store to upgrade storage format: %v", err.Error()) + + return iter.Error() + } + + store.storageFormatVersion = StorageFormatVersion + + return nil +} + +func (store *Store) MerkleTree() *MerkleTree { + return store.merkleTree +} + +func (store *Store) GarbageCollect(tombstonePurgeAge uint64) error { + iter, err := store.storageDriver.GetMatches([][]byte{ PARTITION_DATA_PREFIX }) + + if err != nil { + Log.Errorf("Garbage collection error: %s", err.Error()) + + return EStorage + } + + now := NanoToMilli(uint64(time.Now().UnixNano())) + siblingSetIterator := NewBasicSiblingSetIterator(iter, store.storageFormatVersion) + defer siblingSetIterator.Release() + + if tombstonePurgeAge > now { + tombstonePurgeAge = now + } + + for siblingSetIterator.Next() { + var err error + key := siblingSetIterator.Key() + ssInitial := siblingSetIterator.Value() + + if !ssInitial.CanPurge(now - tombstonePurgeAge) { + continue + } + + store.lock([][]byte{ key }) + + func() { + // the key must be re-queried because at the time of iteration we did not have a lock + // on the key in order to update it + siblingSets, err := store.Get([][]byte{ key }) + + if err != nil { + return + } + + siblingSet := siblingSets[0] + + if siblingSet == nil { + return + } + + if !siblingSet.CanPurge(now - tombstonePurgeAge) { + return + } + + Log.Debugf("GC: Purge tombstone at key %s. It is older than %d milliseconds", string(key), tombstonePurgeAge) + leafID := store.merkleTree.LeafNode(key) + + batch := NewBatch() + batch.Delete(encodePartitionMerkleLeafKey(leafID, key)) + batch.Delete(encodePartitionDataKey(key)) + + err = store.storageDriver.Batch(batch) + }() + + store.unlock([][]byte{ key }, false) + + if err != nil { + Log.Errorf("Garbage collection error: %s", err.Error()) + + return EStorage + } + } + + if iter.Error() != nil { + Log.Errorf("Garbage collection error: %s", iter.Error().Error()) + + return EStorage + } + + return nil +} + +func (store *Store) Get(keys [][]byte) ([]*SiblingSet, error) { + if !store.readsTryLock.TryRLock() { + return nil, EOperationLocked + } + + defer store.readsTryLock.RUnlock() + + if len(keys) == 0 { + Log.Warningf("Passed empty keys parameter in Get(%v)", keys) + + return nil, EEmpty + } + + // Make a new array since we don't want to modify the input array + keysCopy := make([][]byte, len(keys)) + + for i := 0; i < len(keys); i += 1 { + if len(keys[i]) == 0 { + Log.Warningf("Passed empty key in Get(%v)", keys) + + return nil, EEmpty + } + + if len(keys[i]) > MAX_SORTING_KEY_LENGTH { + Log.Warningf("Key is too long %d > %d in Get(%v)", len(keys[i]), MAX_SORTING_KEY_LENGTH, keys) + + return nil, ELength + } + + keysCopy[i] = encodePartitionDataKey(keys[i]) + } + + // use storage driver + values, err := store.storageDriver.Get(keysCopy) + + if err != nil { + Log.Errorf("Storage driver error in Get(%v): %s", keys, err.Error()) + + return nil, EStorage + } + + siblingSetList := make([]*SiblingSet, len(keys)) + + for i := 0; i < len(keys); i += 1 { + if values[i] == nil { + siblingSetList[i] = nil + + continue + } + + var row Row + + err := row.Decode(values[i], store.storageFormatVersion) + + if err != nil { + Log.Errorf("Storage driver error in Get(%v): %s", keys, err.Error()) + + return nil, EStorage + } + + siblingSetList[i] = row.Siblings + } + + return siblingSetList, nil +} + +func (store *Store) GetMatches(keys [][]byte) (SiblingSetIterator, error) { + if !store.readsTryLock.TryRLock() { + return nil, EOperationLocked + } + + defer store.readsTryLock.RUnlock() + + if len(keys) == 0 { + Log.Warningf("Passed empty keys parameter in GetMatches(%v)", keys) + + return nil, EEmpty + } + + // Make a new array since we don't want to modify the input array + keysCopy := make([][]byte, len(keys)) + + for i := 0; i < len(keys); i += 1 { + if len(keys[i]) == 0 { + Log.Warningf("Passed empty key in GetMatches(%v)", keys) + + return nil, EEmpty + } + + if len(keys[i]) > MAX_SORTING_KEY_LENGTH { + Log.Warningf("Key is too long %d > %d in GetMatches(%v)", len(keys[i]), MAX_SORTING_KEY_LENGTH, keys) + + return nil, ELength + } + + keysCopy[i] = encodePartitionDataKey(keys[i]) + } + + iter, err := store.storageDriver.GetMatches(keysCopy) + + if err != nil { + Log.Errorf("Storage driver error in GetMatches(%v): %s", keys, err.Error()) + + return nil, EStorage + } + + return NewBasicSiblingSetIterator(iter, store.storageFormatVersion), nil +} + +func (store *Store) GetAll() (SiblingSetIterator, error) { + if !store.readsTryLock.TryRLock() { + return nil, EOperationLocked + } + + defer store.readsTryLock.RUnlock() + + iter, err := store.storageDriver.GetMatches([][]byte{ encodePartitionDataKey([]byte{ }) }) + + if err != nil { + Log.Errorf("Storage driver error in GetAll(): %s", err.Error()) + + return nil, EStorage + } + + return NewBasicSiblingSetIterator(iter, store.storageFormatVersion), nil +} + +func (store *Store) GetSyncChildren(nodeID uint32) (SiblingSetIterator, error) { + if !store.readsTryLock.TryRLock() { + return nil, EOperationLocked + } + + defer store.readsTryLock.RUnlock() + + if nodeID >= store.merkleTree.NodeLimit() { + return nil, EMerkleRange + } + + min := encodePartitionMerkleLeafKey(store.merkleTree.SubRangeMin(nodeID), []byte{}) + max := encodePartitionMerkleLeafKey(store.merkleTree.SubRangeMax(nodeID), []byte{}) + + iter, err := store.storageDriver.GetRange(min, max) + + if err != nil { + Log.Errorf("Storage driver error in GetSyncChildren(%v): %s", nodeID, err.Error()) + + return nil, EStorage + } + + return NewMerkleChildrenIterator(iter, store.storageDriver, store.storageFormatVersion), nil +} + +func (store *Store) Forget(keys [][]byte) error { + for _, key := range keys { + if key == nil { + continue + } + + store.lock([][]byte{ key }) + + siblingSets, err := store.Get([][]byte{ key }) + + if err != nil { + Log.Errorf("Unable to forget key %s due to storage error: %v", string(key), err) + + store.unlock([][]byte{ key }, false) + + return EStorage + } + + siblingSet := siblingSets[0] + + if siblingSet == nil { + store.unlock([][]byte{ key }, false) + + continue + } + + // Update merkle tree to reflect deletion + leafID := store.merkleTree.LeafNode(key) + newLeafHash := store.merkleTree.NodeHash(leafID).Xor(siblingSet.Hash(key)) + store.merkleTree.UpdateLeafHash(leafID, newLeafHash) + + batch := NewBatch() + batch.Delete(encodePartitionMerkleLeafKey(leafID, key)) + batch.Delete(encodePartitionDataKey(key)) + leafHashBytes := newLeafHash.Bytes() + batch.Put(encodeMerkleLeafKey(leafID), leafHashBytes[:]) + + err = store.storageDriver.Batch(batch) + + store.unlock([][]byte{ key }, false) + + if err != nil { + Log.Errorf("Unable to forget key %s due to storage error: %v", string(key), err.Error()) + + return EStorage + } + + Log.Debugf("Forgot key %s", string(key)) + } + + return nil +} + +func (store *Store) updateInit(keys [][]byte) (map[string]*SiblingSet, error) { + siblingSetMap := map[string]*SiblingSet{ } + + // db objects + for i := 0; i < len(keys); i += 1 { + keys[i] = encodePartitionDataKey(keys[i]) + } + + values, err := store.storageDriver.Get(keys) + + if err != nil { + Log.Errorf("Storage driver error in updateInit(%v): %s", keys, err.Error()) + + return nil, EStorage + } + + for i := 0; i < len(keys); i += 1 { + var row Row + key := decodePartitionDataKey(keys[i]) + siblingSetBytes := values[0] + + if siblingSetBytes == nil { + siblingSetMap[string(key)] = NewSiblingSet(map[*Sibling]bool{ }) + } else { + err := row.Decode(siblingSetBytes, store.storageFormatVersion) + + if err != nil { + Log.Warningf("Could not decode sibling set in updateInit(%v): %s", keys, err.Error()) + + return nil, EStorage + } + + siblingSetMap[string(key)] = row.Siblings + } + + values = values[1:] + } + + return siblingSetMap, nil +} + +func (store *Store) batch(update *Update, merkleTree *MerkleTree) (*Batch, []Row) { + _, leafNodes := merkleTree.Update(update) + batch := NewBatch() + updatedRows := make([]Row, 0, update.Size()) + + // WRITE PARTITION MERKLE LEAFS + for leafID, _ := range leafNodes { + leafHash := merkleTree.NodeHash(leafID).Bytes() + batch.Put(encodeMerkleLeafKey(leafID), leafHash[:]) + + for key, _ := range leafNodes[leafID] { + batch.Put(encodePartitionMerkleLeafKey(leafID, []byte(key)), []byte{ }) + } + } + + // WRITE PARTITION OBJECTS + // atomically increment nextRowID by the amount of new IDs we need (one per key) to allocate enough IDs + // for this batch update. + nextRowID := atomic.AddUint64(&store.nextRowID, uint64(update.Size())) - uint64(update.Size()) + + for diff := range update.Iter() { + key := []byte(diff.Key()) + siblingSet := diff.NewSiblingSet() + row := &Row{ Key: diff.Key(), LocalVersion: nextRowID, Siblings: siblingSet } + updatedRows = append(updatedRows, *row) + + nextRowID++ + + batch.Put(encodePartitionDataKey(key), row.Encode()) + } + + return batch, updatedRows +} + +func (store *Store) updateToSibling(o Op, c *DVV, oldestTombstone *Sibling) *Sibling { + if o.IsDelete() { + if oldestTombstone == nil { + return NewSibling(c, nil, NanoToMilli(uint64(time.Now().UnixNano()))) + } else { + return NewSibling(c, nil, oldestTombstone.Timestamp()) + } + } else { + return NewSibling(c, o.Value(), NanoToMilli(uint64(time.Now().UnixNano()))) + } +} + +func (store *Store) Batch(batch *UpdateBatch) (map[string]*SiblingSet, error) { + if !store.writesTryLock.TryRLock() { + return nil, EOperationLocked + } + + defer store.writesTryLock.RUnlock() + + if batch == nil { + Log.Warningf("Passed nil batch parameter in Batch(%v)", batch) + + return nil, EEmpty + } + + keys := make([][]byte, 0, len(batch.Batch().Ops())) + update := NewUpdate() + + for key, _ := range batch.Batch().Ops() { + keyBytes := []byte(key) + + keys = append(keys, keyBytes) + } + + store.lock(keys) + defer store.unlock(keys, true) + + merkleTree := store.merkleTree + siblingSets, err := store.updateInit(keys) + + //return nil, nil + if err != nil { + return nil, err + } + + for key, op := range batch.Batch().Ops() { + context := batch.Context()[key] + siblingSet := siblingSets[key] + + if siblingSet.Size() == 0 && op.IsDelete() { + delete(siblingSets, key) + + continue + } + + updateContext := context.Context() + + if len(updateContext) == 0 { + updateContext = siblingSet.Join() + } + + updateClock := siblingSet.Event(updateContext, store.nodeID) + var newSibling *Sibling + + if siblingSet.IsTombstoneSet() { + newSibling = store.updateToSibling(op, updateClock, siblingSet.GetOldestTombstone()) + } else { + newSibling = store.updateToSibling(op, updateClock, nil) + } + + updatedSiblingSet := siblingSet.Discard(updateClock).Sync(NewSiblingSet(map[*Sibling]bool{ newSibling: true })) + + siblingSets[key] = updatedSiblingSet + + updatedSiblingSet = store.conflictResolver.ResolveConflicts(updatedSiblingSet) + + update.AddDiff(key, siblingSet, updatedSiblingSet) + } + + storageBatch, updatedRows := store.batch(update, merkleTree) + err = store.storageDriver.Batch(storageBatch) + + if err != nil { + Log.Errorf("Storage driver error in Batch(%v): %s", batch, err.Error()) + + store.discardIDRange(updatedRows) + store.merkleTree.UndoUpdate(update) + + return nil, EStorage + } + + store.notifyWatchers(updatedRows) + + return siblingSets, nil +} + +func (store *Store) Merge(siblingSets map[string]*SiblingSet) error { + if !store.writesTryLock.TryRLock() { + return EOperationLocked + } + + defer store.writesTryLock.RUnlock() + + if siblingSets == nil { + Log.Warningf("Passed nil sibling sets in Merge(%v)", siblingSets) + + return EEmpty + } + + keys := make([][]byte, 0, len(siblingSets)) + + for key, _ := range siblingSets { + keys = append(keys, []byte(key)) + } + + store.lock(keys) + defer store.unlock(keys, true) + + merkleTree := store.merkleTree + mySiblingSets, err := store.updateInit(keys) + + if err != nil { + return err + } + + update := NewUpdate() + + for _, key := range keys { + key = decodePartitionDataKey(key) + siblingSet := siblingSets[string(key)] + mySiblingSet := mySiblingSets[string(key)] + + if siblingSet == nil { + continue + } + + if mySiblingSet == nil { + mySiblingSet = NewSiblingSet(map[*Sibling]bool{ }) + } + + updatedSiblingSet := mySiblingSet.MergeSync(siblingSet, store.nodeID) + + for sibling := range updatedSiblingSet.Iter() { + if !mySiblingSet.Has(sibling) { + updatedSiblingSet = store.conflictResolver.ResolveConflicts(updatedSiblingSet) + + update.AddDiff(string(key), mySiblingSet, updatedSiblingSet) + } + } + } + + if update.Size() != 0 { + batch, updatedRows := store.batch(update, merkleTree) + err := store.storageDriver.Batch(batch) + + if err != nil { + Log.Errorf("Storage driver error in Merge(%v): %s", siblingSets, err.Error()) + + store.discardIDRange(updatedRows) + store.merkleTree.UndoUpdate(update) + + return EStorage + } + + store.notifyWatchers(updatedRows) + } + + return nil +} + +func (store *Store) Watch(ctx context.Context, keys [][]byte, prefixes [][]byte, localVersion uint64, ch chan Row) { + store.addWatcher(ctx, keys, prefixes, localVersion, ch) +} + +func (store *Store) addWatcher(ctx context.Context, keys [][]byte, prefixes [][]byte, localVersion uint64, ch chan Row) error { + store.watcherLock.Lock() + defer store.watcherLock.Unlock() + + keysCopy := make([][]byte, len(keys)) + + for i := 0; i < len(keys); i += 1 { + keysCopy[i] = encodePartitionDataKey(keys[i]) + } + + // use storage driver + values, err := store.storageDriver.Get(keysCopy) + + if err != nil { + Log.Errorf("Storage driver error in addWatcher(): %s", err.Error()) + + close(ch) + + return EStorage + } + + for i := 0; i < len(keys); i += 1 { + if values[i] == nil { + continue + } + + var row Row + + err := row.Decode(values[i], store.storageFormatVersion) + + if err != nil { + Log.Errorf("Storage driver error in addWatcher(): %s", err.Error()) + + close(ch) + + return EStorage + } + + if row.LocalVersion > localVersion { + ch <- row + } + } + + prefixesCopy := make([][]byte, len(prefixes)) + + for i := 0; i < len(prefixes); i += 1 { + prefixesCopy[i] = encodePartitionDataKey(prefixes[i]) + } + + iter, err := store.storageDriver.GetMatches(prefixesCopy) + + if err != nil { + Log.Errorf("Storage driver error in addWatcher(): %s", err.Error()) + + close(ch) + + return EStorage + } + + ssIter := NewBasicSiblingSetIterator(iter, store.storageFormatVersion) + + for ssIter.Next() { + if ssIter.LocalVersion() <= localVersion { + continue + } + + var row Row + + row.Key = string(ssIter.Key()) + row.LocalVersion = ssIter.LocalVersion() + row.Siblings = ssIter.Value() + + ch <- row + } + + if ssIter.Error() != nil { + Log.Errorf("Storage driver error in addWatcher(): %s", err.Error()) + + close(ch) + + return EStorage + } + + ch <- Row{} + + store.monitor.AddListener(ctx, keys, prefixes, ch) + + return nil +} + +func (store *Store) notifyWatchers(updatedRows []Row) { + store.watcherLock.Lock() + defer store.watcherLock.Unlock() + + // submit update to watcher collection + for _, update := range updatedRows { + store.monitor.Notify(update) + } +} + +func (store *Store) discardIDRange(updatedRows []Row) { + store.watcherLock.Lock() + defer store.watcherLock.Unlock() + + if len(updatedRows) == 0 { + return + } + + store.monitor.DiscardIDRange(updatedRows[0].LocalVersion, updatedRows[len(updatedRows) - 1].LocalVersion) +} + +func (store *Store) sortedLockKeys(keys [][]byte) ([]string, []string) { + leafSet := make(map[string]bool, len(keys)) + keyStrings := make([]string, 0, len(keys)) + nodeStrings := make([]string, 0, len(keys)) + + for _, key := range keys { + keyStrings = append(keyStrings, string(key)) + leafSet[string(nodeBytes(store.merkleTree.LeafNode(key)))] = true + } + + for node, _ := range leafSet { + nodeStrings = append(nodeStrings, node) + } + + sort.Strings(keyStrings) + sort.Strings(nodeStrings) + + return keyStrings, nodeStrings +} + +func (store *Store) lock(keys [][]byte) { + keyStrings, nodeStrings := store.sortedLockKeys(keys) + + for _, key := range keyStrings { + store.multiLock.Lock([]byte(key)) + } + + for _, key := range nodeStrings { + store.merkleLock.Lock([]byte(key)) + } +} + +func (store *Store) unlock(keys [][]byte, keysArePrefixed bool) { + tKeys := make([][]byte, 0, len(keys)) + + for _, key := range keys { + if keysArePrefixed { + tKeys = append(tKeys, key[1:]) + } else { + tKeys = append(tKeys, key) + } + } + + keyStrings, nodeStrings := store.sortedLockKeys(tKeys) + + for _, key := range keyStrings { + store.multiLock.Unlock([]byte(key)) + } + + for _, key := range nodeStrings { + store.merkleLock.Unlock([]byte(key)) + } +} + +func (store *Store) LockWrites() { + store.writesTryLock.WLock() +} + +func (store *Store) UnlockWrites() { + store.writesTryLock.WUnlock() +} + +func (store *Store) LockReads() { + store.readsTryLock.WLock() +} + +func (store *Store) UnlockReads() { + store.readsTryLock.WUnlock() +} + +type UpdateBatch struct { + RawBatch *Batch `json:"batch"` + Contexts map[string]*DVV `json:"context"` +} + +func NewUpdateBatch() *UpdateBatch { + return &UpdateBatch{ NewBatch(), map[string]*DVV{ } } +} + +func (updateBatch *UpdateBatch) Batch() *Batch { + return updateBatch.RawBatch +} + +func (updateBatch *UpdateBatch) Context() map[string]*DVV { + return updateBatch.Contexts +} + +func (updateBatch *UpdateBatch) ToJSON() ([]byte, error) { + return json.Marshal(updateBatch) +} + +func (updateBatch *UpdateBatch) FromJSON(reader io.Reader) error { + var tempUpdateBatch UpdateBatch + + decoder := json.NewDecoder(reader) + err := decoder.Decode(&tempUpdateBatch) + + if err != nil { + return err + } + + updateBatch.Contexts = map[string]*DVV{ } + updateBatch.RawBatch = NewBatch() + + for k, op := range tempUpdateBatch.Batch().Ops() { + context, ok := tempUpdateBatch.Context()[k] + + if !ok || context == nil { + context = NewDVV(NewDot("", 0), map[string]uint64{ }) + } + + err = nil + + if op.IsDelete() { + _, err = updateBatch.Delete(op.Key(), context) + } else { + _, err = updateBatch.Put(op.Key(), op.Value(), context) + } + + if err != nil { + return err + } + } + + return nil +} + +func (updateBatch *UpdateBatch) Put(key []byte, value []byte, context *DVV) (*UpdateBatch, error) { + if len(key) == 0 { + Log.Warningf("Passed an empty key to Put(%v, %v, %v)", key, value, context) + + return nil, EEmpty + } + + if len(key) > MAX_SORTING_KEY_LENGTH { + Log.Warningf("Key is too long %d > %d in Put(%v, %v, %v)", len(key), MAX_SORTING_KEY_LENGTH, key, value, context) + + return nil, ELength + } + + if context == nil { + Log.Warningf("Passed a nil context to Put(%v, %v, %v)", key, value, context) + + return nil, EEmpty + } + + if value == nil { + value = []byte{ } + } + + updateBatch.Batch().Put(key, value) + updateBatch.Context()[string(key)] = context + + return updateBatch, nil +} + +func (updateBatch *UpdateBatch) Delete(key []byte, context *DVV) (*UpdateBatch, error) { + if len(key) == 0 { + Log.Warningf("Passed an empty key to Delete(%v, %v)", key, context) + + return nil, EEmpty + } + + if len(key) > MAX_SORTING_KEY_LENGTH { + Log.Warningf("Key is too long %d > %d in Delete(%v, %v)", len(key), MAX_SORTING_KEY_LENGTH, key, context) + + return nil, ELength + } + + if context == nil { + Log.Warningf("Passed a nil context to Delete(%v, %v)", key, context) + + return nil, EEmpty + } + + updateBatch.Batch().Delete(key) + updateBatch.Context()[string(key)] = context + + return updateBatch, nil +} + +type MerkleChildrenIterator struct { + dbIterator StorageIterator + storageDriver StorageDriver + parseError error + currentKey []byte + currentValue *SiblingSet + storageFormatVersion string + currentLocalVersion uint64 +} + +func NewMerkleChildrenIterator(iter StorageIterator, storageDriver StorageDriver, storageFormatVersion string) *MerkleChildrenIterator { + return &MerkleChildrenIterator{ iter, storageDriver, nil, nil, nil, storageFormatVersion, 0 } + // not actually the prefix for all keys in the range, but it will be a consistent length + // prefix := encodePartitionMerkleLeafKey(nodeID, []byte{ }) +} + +func (mIterator *MerkleChildrenIterator) Next() bool { + mIterator.currentKey = nil + mIterator.currentValue = nil + mIterator.currentLocalVersion = 0 + + if !mIterator.dbIterator.Next() { + if mIterator.dbIterator.Error() != nil { + Log.Errorf("Storage driver error in Next(): %s", mIterator.dbIterator.Error()) + } + + mIterator.Release() + + return false + } + + _, key, err := decodePartitionMerkleLeafKey(mIterator.dbIterator.Key()) + + if err != nil { + Log.Errorf("Corrupt partition merkle leaf key in Next(): %v", mIterator.dbIterator.Key()) + + mIterator.Release() + + return false + } + + values, err := mIterator.storageDriver.Get([][]byte{ encodePartitionDataKey(key) }) + + if err != nil { + Log.Errorf("Storage driver error in Next(): %s", err) + + mIterator.Release() + + return false + } + + value := values[0] + + var row Row + + mIterator.parseError = row.Decode(value, mIterator.storageFormatVersion) + + if mIterator.parseError != nil { + Log.Errorf("Storage driver error in Next() key = %v, value = %v: %s", key, value, mIterator.parseError.Error()) + + mIterator.Release() + + return false + } + + mIterator.currentKey = key + mIterator.currentValue = row.Siblings + mIterator.currentLocalVersion = row.LocalVersion + + return true +} + +func (mIterator *MerkleChildrenIterator) Prefix() []byte { + return nil +} + +func (mIterator *MerkleChildrenIterator) Key() []byte { + return mIterator.currentKey +} + +func (mIterator *MerkleChildrenIterator) Value() *SiblingSet { + return mIterator.currentValue +} + +func (mIterator *MerkleChildrenIterator) LocalVersion() uint64 { + return mIterator.currentLocalVersion +} + +func (mIterator *MerkleChildrenIterator) Release() { + mIterator.dbIterator.Release() +} + +func (mIterator *MerkleChildrenIterator) Error() error { + if mIterator.parseError != nil { + return EStorage + } + + if mIterator.dbIterator.Error() != nil { + return EStorage + } + + return nil +} + +type BasicSiblingSetIterator struct { + dbIterator StorageIterator + parseError error + currentKey []byte + currentValue *SiblingSet + storageFormatVersion string + currentLocalVersion uint64 +} + +func NewBasicSiblingSetIterator(dbIterator StorageIterator, storageFormatVersion string) *BasicSiblingSetIterator { + return &BasicSiblingSetIterator{ dbIterator, nil, nil, nil, storageFormatVersion, 0 } +} + +func (ssIterator *BasicSiblingSetIterator) Next() bool { + ssIterator.currentKey = nil + ssIterator.currentValue = nil + ssIterator.currentLocalVersion = 0 + + if !ssIterator.dbIterator.Next() { + if ssIterator.dbIterator.Error() != nil { + Log.Errorf("Storage driver error in Next(): %s", ssIterator.dbIterator.Error()) + } + + return false + } + + var row Row + + ssIterator.parseError = row.Decode(ssIterator.dbIterator.Value(), ssIterator.storageFormatVersion) + + if ssIterator.parseError != nil { + Log.Errorf("Storage driver error in Next() key = %v, value = %v: %s", ssIterator.dbIterator.Key(), ssIterator.dbIterator.Value(), ssIterator.parseError.Error()) + + ssIterator.Release() + + return false + } + + ssIterator.currentKey = ssIterator.dbIterator.Key() + ssIterator.currentValue = row.Siblings + ssIterator.currentLocalVersion = row.LocalVersion + + return true +} + +func (ssIterator *BasicSiblingSetIterator) Prefix() []byte { + return decodePartitionDataKey(ssIterator.dbIterator.Prefix()) +} + +func (ssIterator *BasicSiblingSetIterator) Key() []byte { + if ssIterator.currentKey == nil { + return nil + } + + return decodePartitionDataKey(ssIterator.currentKey) +} + +func (ssIterator *BasicSiblingSetIterator) Value() *SiblingSet { + return ssIterator.currentValue +} + +func (ssIterator *BasicSiblingSetIterator) LocalVersion() uint64 { + return ssIterator.currentLocalVersion +} + +func (ssIterator *BasicSiblingSetIterator) Release() { + ssIterator.dbIterator.Release() +} + +func (ssIterator *BasicSiblingSetIterator) Error() error { + if ssIterator.parseError != nil { + return EStorage + } + + if ssIterator.dbIterator.Error() != nil { + return EStorage + } + + return nil +} diff --git a/vendor/github.com/armPelionEdge/devicedb/client/api_client.go b/vendor/github.com/armPelionEdge/devicedb/client/api_client.go new file mode 100644 index 0000000..88825d5 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client/api_client.go @@ -0,0 +1,396 @@ +package client +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/armPelionEdge/devicedb/routes" + . "github.com/armPelionEdge/devicedb/error" +) + +type APIClientConfig struct { + Servers []string +} + +type APIClient struct { + servers []string + nextServerIndex int + httpClient *http.Client +} + +func New(config APIClientConfig) *APIClient { + return &APIClient{ + servers: config.Servers, + nextServerIndex: 0, + httpClient: &http.Client{ }, + } +} + +func (client *APIClient) nextServer() (server string) { + if len(client.servers) == 0 { + return + } + + server = client.servers[client.nextServerIndex] + client.nextServerIndex = (client.nextServerIndex + 1) % len(client.servers) + + return +} + +func (client *APIClient) ClusterOverview(ctx context.Context) (routes.ClusterOverview, error) { + encodedOverview, err := client.sendRequest(ctx, "GET", "/cluster", nil) + + if err != nil { + return routes.ClusterOverview{}, err + } + + var clusterOverview routes.ClusterOverview + + err = json.Unmarshal(encodedOverview, &clusterOverview) + + if err != nil { + return routes.ClusterOverview{}, err + } + + return clusterOverview, nil +} + +func (client *APIClient) RelayStatus(ctx context.Context, relayID string) (routes.RelayStatus, error) { + encodedStatus, err := client.sendRequest(ctx, "GET", "/relays/" + relayID, nil) + + if err != nil { + return routes.RelayStatus{}, err + } + + var relayStatus routes.RelayStatus + + err = json.Unmarshal(encodedStatus, &relayStatus) + + if err != nil { + return routes.RelayStatus{}, err + } + + return relayStatus, nil +} + +func (client *APIClient) AddSite(ctx context.Context, siteID string) error { + _, err := client.sendRequest(ctx, "PUT", "/sites/" + siteID, nil) + + if err != nil { + return err + } + + return nil +} + +func (client *APIClient) RemoveSite(ctx context.Context, siteID string) error { + _, err := client.sendRequest(ctx, "DELETE", "/sites/" + siteID, nil) + + if err != nil { + return err + } + + return nil +} + +func (client *APIClient) AddRelay(ctx context.Context, relayID string) error { + _, err := client.sendRequest(ctx, "PUT", "/relays/" + relayID, nil) + + if err != nil { + return err + } + + return nil +} + +func (client *APIClient) MoveRelay(ctx context.Context, relayID string, siteID string) error { + var relaySettingsPatch routes.RelaySettingsPatch = routes.RelaySettingsPatch{ Site: siteID } + + body, err := json.Marshal(relaySettingsPatch) + + if err != nil { + return err + } + + _, err = client.sendRequest(ctx, "PATCH", "/relays/" + relayID, body) + + if err != nil { + return err + } + + return nil +} + +func (client *APIClient) RemoveRelay(ctx context.Context, relayID string) error { + _, err := client.sendRequest(ctx, "DELETE", "/relays/" + relayID, nil) + + if err != nil { + return err + } + + return nil +} + +func (client *APIClient) Batch(ctx context.Context, siteID string, bucket string, batch Batch) (int, int, error) { + transportUpdateBatch := batch.ToTransportUpdateBatch() + encodedTransportUpdateBatch, err := json.Marshal(transportUpdateBatch) + + if err != nil { + return 0, 0, err + } + + response, err := client.sendRequest(ctx, "POST", fmt.Sprintf("/sites/%s/buckets/%s/batches", siteID, bucket), encodedTransportUpdateBatch) + + if err != nil { + return 0, 0, err + } + + var batchResult routes.BatchResult + + err = json.Unmarshal(response, &batchResult) + + if err != nil { + return 0, 0, err + } + + if batchResult.Quorum { + return int(batchResult.Replicas), int(batchResult.NApplied), nil + } + + return int(batchResult.Replicas), int(batchResult.NApplied), ENoQuorum +} + +func (client *APIClient) Get(ctx context.Context, siteID string, bucket string, keys []string) ([]Entry, error) { + url := fmt.Sprintf("/sites/%s/buckets/%s/keys?", siteID, bucket) + + for i, key := range keys { + url += "key=" + key + + if i != len(keys) - 1 { + url += "&" + } + } + + encodedAPIEntries, err := client.sendRequest(ctx, "GET", url, nil) + + if err != nil { + return nil, err + } + + var apiEntries []routes.APIEntry + + err = json.Unmarshal(encodedAPIEntries, &apiEntries) + + if err != nil { + return nil, err + } + + var entries []Entry = make([]Entry, len(apiEntries)) + + for i, apiEntry := range apiEntries { + entries[i] = Entry{ + Context: apiEntry.Context, + Siblings: apiEntry.Siblings, + } + } + + return entries, nil +} + +func (client *APIClient) GetMatches(ctx context.Context, siteID string, bucket string, keys []string) (EntryIterator, error) { + url := fmt.Sprintf("/sites/%s/buckets/%s/keys?", siteID, bucket) + + for i, key := range keys { + url += "prefix=" + key + + if i != len(keys) - 1 { + url += "&" + } + } + + encodedAPIEntries, err := client.sendRequest(ctx, "GET", url, nil) + + if err != nil { + return EntryIterator{}, err + } + + var apiEntries []routes.APIEntry + + err = json.Unmarshal(encodedAPIEntries, &apiEntries) + + if err != nil { + return EntryIterator{}, err + } + + var entryIterator EntryIterator = EntryIterator{ currentEntry: -1, entries: make([]iteratorEntry, len(apiEntries)) } + + for i, apiEntry := range apiEntries { + entryIterator.entries[i] = iteratorEntry{ + key: apiEntry.Key, + prefix: apiEntry.Prefix, + entry: Entry{ + Context: apiEntry.Context, + Siblings: apiEntry.Siblings, + }, + } + } + + return entryIterator, nil +} + +func (client *APIClient) LogDump(ctx context.Context) (routes.LogDump, error) { + url := "/log_dump" + response, err := client.sendRequest(ctx, "GET", url, nil) + + if err != nil { + return routes.LogDump{}, err + } + + var logDump routes.LogDump + + if err := json.Unmarshal(response, &logDump); err != nil { + return routes.LogDump{}, err + } + + return logDump, nil +} + +func (client *APIClient) Snapshot(ctx context.Context) (routes.Snapshot, error) { + url := "/snapshot" + response, err := client.sendRequest(ctx, "POST", url, nil) + + if err != nil { + return routes.Snapshot{}, err + } + + var snapshot routes.Snapshot + + if err := json.Unmarshal(response, &snapshot); err != nil { + return routes.Snapshot{}, err + } + + return snapshot, nil +} + +func (client *APIClient) GetSnapshot(ctx context.Context, uuid string) (routes.Snapshot, error) { + url := "/snapshot/" + uuid + response, err := client.sendRequest(ctx, "GET", url, nil) + + if err != nil { + return routes.Snapshot{}, err + } + + var snapshot routes.Snapshot + + if err := json.Unmarshal(response, &snapshot); err != nil { + return routes.Snapshot{}, err + } + + return snapshot, nil +} + +func (client *APIClient) DownloadSnapshot(ctx context.Context, uuid string) (io.ReadCloser, error) { + url := "/snapshot/" + uuid + ".tar" + response, err := client.sendRequestRaw(ctx, "GET", url, nil) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (client *APIClient) sendRequestRaw(ctx context.Context, httpVerb string, endpointURL string, body []byte) (io.ReadCloser, error) { + u := fmt.Sprintf("http://%s%s", client.nextServer(), endpointURL) + request, err := http.NewRequest(httpVerb, u, bytes.NewReader(body)) + + if err != nil { + return nil, err + } + + request = request.WithContext(ctx) + + resp, err := client.httpClient.Do(request) + + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + errorMessage, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + return nil, &ErrorStatusCode{ Message: string(errorMessage), StatusCode: resp.StatusCode } + } + + return resp.Body, nil +} + +func (client *APIClient) sendRequest(ctx context.Context, httpVerb string, endpointURL string, body []byte) ([]byte, error) { + u := fmt.Sprintf("http://%s%s", client.nextServer(), endpointURL) + request, err := http.NewRequest(httpVerb, u, bytes.NewReader(body)) + + if err != nil { + return nil, err + } + + request = request.WithContext(ctx) + + resp, err := client.httpClient.Do(request) + + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + errorMessage, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + return nil, &ErrorStatusCode{ Message: string(errorMessage), StatusCode: resp.StatusCode } + } + + responseBody, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + return responseBody, nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/client/batch.go b/vendor/github.com/armPelionEdge/devicedb/client/batch.go new file mode 100644 index 0000000..f081cae --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client/batch.go @@ -0,0 +1,76 @@ +package client +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/armPelionEdge/devicedb/transport" +) + +// Contains a database update operation +type Batch struct { + ops map[string]transport.TransportUpdateOp +} + +// Create a new batch update +func NewBatch() *Batch { + return &Batch{ + ops: make(map[string]transport.TransportUpdateOp), + } +} + +// Adds a key put operation to this update. Key and value are the +// key that is being modified and the value that it should be set to. +// context is the causal context for the modification. It can be +// left blank if +func (batch *Batch) Put(key string, value string, context string) *Batch { + batch.ops[key] = transport.TransportUpdateOp{ + Type: "put", + Key: key, + Value: value, + Context: context, + } + + return batch +} + +func (batch *Batch) Delete(key string, context string) *Batch { + batch.ops[key] = transport.TransportUpdateOp{ + Type: "delete", + Key: key, + Context: context, + } + + return batch +} + +func (batch *Batch) ToTransportUpdateBatch() transport.TransportUpdateBatch { + var updateBatch []transport.TransportUpdateOp = make([]transport.TransportUpdateOp, 0, len(batch.ops)) + + for _, op := range batch.ops { + updateBatch = append(updateBatch, op) + } + + return transport.TransportUpdateBatch(updateBatch) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/client/client.go b/vendor/github.com/armPelionEdge/devicedb/client/client.go new file mode 100644 index 0000000..09f622b --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client/client.go @@ -0,0 +1,238 @@ +package client +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "errors" + "net/http" + "bytes" + "io/ioutil" + "encoding/json" + "time" + "strings" + "context" + "fmt" + + . "github.com/armPelionEdge/devicedb/raft" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/cluster" + + "github.com/armPelionEdge/devicedb/rest" +) + +const DefaultClientTimeout = time.Second * 10 + +type ErrorStatusCode struct { + StatusCode int + Message string +} + +func (errorStatus *ErrorStatusCode) Error() string { + return errorStatus.Message +} + +type ClientConfig struct { + Timeout time.Duration +} + +var EClientTimeout = errors.New("Client request timed out") + +type Client struct { + httpClient *http.Client +} + +func NewClient(config ClientConfig) *Client { + if config.Timeout == 0 { + config.Timeout = DefaultClientTimeout + } + + return &Client{ + httpClient: &http.Client{ + Timeout: config.Timeout, + }, + } +} + +func (client *Client) sendRequest(ctx context.Context, httpVerb string, endpointURL string, body []byte) ([]byte, error) { + request, err := http.NewRequest(httpVerb, endpointURL, bytes.NewReader(body)) + + if err != nil { + return nil, err + } + + request = request.WithContext(ctx) + + resp, err := client.httpClient.Do(request) + + if err != nil { + if strings.Contains(err.Error(), "Timeout") { + return nil, EClientTimeout + } + + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + errorMessage, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + return nil, &ErrorStatusCode{ Message: string(errorMessage), StatusCode: resp.StatusCode } + } + + responseBody, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + return responseBody, nil +} + +// Use an existing cluster member to bootstrap the addition of another node +// to that cluster. host and port indicate the address of the existing cluster +// member while nodeAddress contains the ID, host name and port of the new +// cluster member +// +// Return Values: +// EClientTimeout: The request to the node timed out +func (client *Client) AddNode(ctx context.Context, memberAddress PeerAddress, newMemberConfig NodeConfig) error { + encodedNodeConfig, _ := json.Marshal(newMemberConfig) + _, err := client.sendRequest(ctx, "POST", memberAddress.ToHTTPURL("/cluster/nodes"), encodedNodeConfig) + + if _, ok := err.(*ErrorStatusCode); ok { + var dbError DBerror + + parseErr := json.Unmarshal([]byte(err.(*ErrorStatusCode).Message), &dbError) + + if parseErr == nil { + return dbError + } + } + + return err +} + +// Ask a cluster member to initiate the removal of some node from its cluster. +// host and port indicate the address of the initiator node while nodeID is +// the ID of the node that should be removed. +// +// Return Values: +// EClientTimeout: The request to the node timed out +func (client *Client) RemoveNode(ctx context.Context, memberAddress PeerAddress, nodeID uint64, replacementNodeID uint64, decommission, forwarded bool) error { + var queryString = "" + + endpoint := memberAddress.ToHTTPURL("/cluster/nodes/" + fmt.Sprintf("%d", nodeID)) + + if forwarded { + queryString += "forwarded=true&" + } + + if decommission { + queryString += "decommission=true&" + } + + if replacementNodeID != 0 { + queryString += fmt.Sprintf("replace=%d&", replacementNodeID) + } + + if len(queryString) != 0 { + // take off the last & + queryString = queryString[:len(queryString) - 1] + } + + endpoint += "?" + queryString + + _, err := client.sendRequest(ctx, "DELETE", endpoint, []byte{ }) + + return err +} + +func (client *Client) DecommissionNode(ctx context.Context, memberAddress PeerAddress, nodeID uint64) error { + return client.RemoveNode(ctx, memberAddress, nodeID, 0, true, false) +} + +func (client *Client) ForceRemoveNode(ctx context.Context, memberAddress PeerAddress, nodeID uint64) error { + return client.RemoveNode(ctx, memberAddress, nodeID, 0, false, false) +} + +func (client *Client) ReplaceNode(ctx context.Context, memberAddress PeerAddress, nodeID uint64, replacementNodeID uint64) error { + return client.RemoveNode(ctx, memberAddress, nodeID, replacementNodeID, false, false) +} + +func (client *Client) MerkleTreeStats(ctx context.Context, memberAddress PeerAddress, siteID string, bucketName string) (rest.MerkleTree, error) { + endpoint := memberAddress.ToHTTPURL(fmt.Sprintf("/sites/%s/buckets/%s/merkle", siteID, bucketName)) + response, err := client.sendRequest(ctx, "GET", endpoint, []byte{ }) + + if err != nil { + return rest.MerkleTree{}, err + } + + var merkleTree rest.MerkleTree + + if err := json.Unmarshal(response, &merkleTree); err != nil { + return rest.MerkleTree{}, err + } + + return merkleTree, nil +} + +func (client *Client) MerkleTreeNode(ctx context.Context, memberAddress PeerAddress, siteID string, bucketName string, nodeID uint32) (rest.MerkleNode, error) { + endpoint := memberAddress.ToHTTPURL(fmt.Sprintf("/sites/%s/buckets/%s/merkle/nodes/%d", siteID, bucketName, nodeID)) + response, err := client.sendRequest(ctx, "GET", endpoint, []byte{ }) + + if err != nil { + return rest.MerkleNode{}, err + } + + var merkleNode rest.MerkleNode + + if err := json.Unmarshal(response, &merkleNode); err != nil { + return rest.MerkleNode{}, err + } + + return merkleNode, nil +} + +func (client *Client) MerkleTreeNodeKeys(ctx context.Context, memberAddress PeerAddress, siteID string, bucketName string, nodeID uint32) (rest.MerkleKeys, error) { + endpoint := memberAddress.ToHTTPURL(fmt.Sprintf("/sites/%s/buckets/%s/merkle/nodes/%d/keys", siteID, bucketName, nodeID)) + response, err := client.sendRequest(ctx, "GET", endpoint, []byte{ }) + + if err != nil { + return rest.MerkleKeys{}, err + } + + var merkleKeys rest.MerkleKeys + + if err := json.Unmarshal(response, &merkleKeys); err != nil { + return rest.MerkleKeys{}, err + } + + return merkleKeys, nil +} diff --git a/vendor/github.com/armPelionEdge/devicedb/client/entry.go b/vendor/github.com/armPelionEdge/devicedb/client/entry.go new file mode 100644 index 0000000..6a67c26 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client/entry.go @@ -0,0 +1,30 @@ +package client +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type Entry struct { + Siblings []string + Context string +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/client/entry_iterator.go b/vendor/github.com/armPelionEdge/devicedb/client/entry_iterator.go new file mode 100644 index 0000000..20afdad --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client/entry_iterator.go @@ -0,0 +1,68 @@ +package client +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type iteratorEntry struct { + prefix string + key string + entry Entry +} + +type EntryIterator struct { + entries []iteratorEntry + currentEntry int +} + +func (iter *EntryIterator) Next() bool { + if iter.currentEntry < len(iter.entries) { + iter.currentEntry++ + } + + return iter.currentEntry < len(iter.entries) +} + +func (iter *EntryIterator) Prefix() string { + if iter.currentEntry < 0 || iter.currentEntry >= len(iter.entries) { + return "" + } + + return iter.entries[iter.currentEntry].prefix +} + +func (iter *EntryIterator) Key() string { + if iter.currentEntry < 0 || iter.currentEntry >= len(iter.entries) { + return "" + } + + return iter.entries[iter.currentEntry].key +} + +func (iter *EntryIterator) Entry() Entry { + if iter.currentEntry < 0 || iter.currentEntry >= len(iter.entries) { + return Entry{} + } + + return iter.entries[iter.currentEntry].entry +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/client_relay/client.go b/vendor/github.com/armPelionEdge/devicedb/client_relay/client.go new file mode 100644 index 0000000..c031423 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client_relay/client.go @@ -0,0 +1,342 @@ +package client_relay +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + "github.com/armPelionEdge/devicedb/client" + "github.com/armPelionEdge/devicedb/transport" +) + +type Client interface { + // Execute a batch update in DeviceDB. The context is bound to the + // request. The bucket should be the name of the devicedb bucket to + // which this update should be applied. + Batch(ctx context.Context, bucket string, batch client.Batch) error + // Get the value of one or more keys in devicedb. The bucket shold be + // the name of the devicedb bucket to which this update should be applied. + // The keys array describes which keys should be retrieved. If no error + // occurs this function will return an array of values corresponding + // to the keys that were requested. The results array will mirror the + // keys array. In other words, the ith value in the result is the value + // for key i. If a key does not exist the value will be nil. + Get(ctx context.Context, bucket string, keys []string) ([]*client.Entry, error) + // Get keys matching one or more prefixes. The keys array represents + // a list of prefixes to query. The resulting iterator will iterate + // through database values whose key matches one of the specified + // prefixes + GetMatches(ctx context.Context, bucket string, keys []string) (EntryIterator, error) + // Watch for updates to a set of keys or keys matching certain prefixes + // lastSerial specifies the serial number of the last received update. + // The update channel that is returned by this function will stream relevant + // updates to a consumer. If a disconnection happens from the server + // then this client will push an error to the error channel and attempt + // to form a new connection until it is successful. If the consumer supplies + // a context that is cancellable they can cancel the context which will + // cause both the update and error channels to close. These channels will + // not close until the context is cancelled even during disconnections + // from the server. The client must consume all messages from the update + // and error channels until they are closed to prevent blocking of the watcher + // goroutine. + Watch(ctx context.Context, bucket string, keys []string, prefixes []string, lastSerial uint64) (chan Update, chan error) +} + +type Config struct { + // The server URI is the base URI for the devicedb server + // An example of this is https://localhost:9000 + ServerURI string + // Provide a TLS config if you are connecting to a TLS + // enabled devicedb relay server. You will need to provide + // the relay CA and server name (the relay ID) + TLSConfig *tls.Config + // When a watcher is established by a call to Watch() + // disconnections may occur while the watcher is still + // up. This field determines how often the watcher + // will try to reconnect until a new connection can be + // established. + WatchReconnectTimeout time.Duration +} + +// Create a new DeviceDB client +func New(config Config) Client { + if config.WatchReconnectTimeout == 0 { + config.WatchReconnectTimeout = time.Second + } + + return &HTTPClient{ + server: config.ServerURI, + httpClient: &http.Client{ Transport: &http.Transport{ TLSClientConfig: config.TLSConfig } }, + watchReconnectTimeout: config.WatchReconnectTimeout, + } +} + +type HTTPClient struct { + server string + httpClient *http.Client + watchReconnectTimeout time.Duration +} + +func (c *HTTPClient) Batch(ctx context.Context, bucket string, batch client.Batch) error { + var transportBatch transport.TransportUpdateBatch = batch.ToTransportUpdateBatch() + url := fmt.Sprintf("/%s/batch", bucket) + + body, err := json.Marshal(transportBatch) + + if err != nil { + return err + } + + respBody, err := c.sendRequest(ctx, "POST", url, body) + + if err != nil { + return err + } + + respBody.Close() + + return nil +} + +func (c *HTTPClient) Get(ctx context.Context, bucket string, keys []string) ([]*client.Entry, error) { + url := fmt.Sprintf("/%s/values?", bucket) + body, err := json.Marshal(keys) + + if err != nil { + return nil, err + } + + respBody, err := c.sendRequest(ctx, "POST", url, body) + + if err != nil { + return nil, err + } + + defer respBody.Close() + + var decoder *json.Decoder = json.NewDecoder(respBody) + var transportSiblingSets []*transport.TransportSiblingSet + + err = decoder.Decode(&transportSiblingSets) + + if err != nil { + return nil, err + } + + if len(transportSiblingSets) != len(keys) { + return nil, errors.New(fmt.Sprintf("A protocol error occurred: Asked for %d keys but received %d values in the result array", len(keys), len(transportSiblingSets))) + } + + var entries []*client.Entry = make([]*client.Entry, len(transportSiblingSets)) + + for i, _ := range keys { + if transportSiblingSets[i] == nil { + continue + } + + entries[i] = &client.Entry{ + Context: transportSiblingSets[i].Context, + Siblings: transportSiblingSets[i].Siblings, + } + } + + return entries, nil +} + +func (c *HTTPClient) GetMatches(ctx context.Context, bucket string, keys []string) (EntryIterator, error) { + url := fmt.Sprintf("/%s/matches?", bucket) + body, err := json.Marshal(keys) + + if err != nil { + return nil, err + } + + respBody, err := c.sendRequest(ctx, "POST", url, body) + + if err != nil { + return nil, err + } + + return &StreamedEntryIterator{ reader: respBody }, nil +} + +func (c *HTTPClient) Watch(ctx context.Context, bucket string, keys []string, prefixes []string, lastSerial uint64) (chan Update, chan error) { + var query url.Values = url.Values{} + + for _, key := range keys { + query.Add("key", key) + } + + for _, prefix := range prefixes { + query.Add("prefix", prefix) + } + + updates := make(chan Update) + errorsChan := make(chan error) + + go func() { + defer func() { + close(updates) + close(errorsChan) + }() + + for { + reqCtx, cancel := context.WithCancel(ctx) + url := fmt.Sprintf("/%s/watch?%s&lastSerial=%d", bucket, query.Encode(), lastSerial) + respBody, err := c.sendRequest(reqCtx, "GET", url, nil) + + if err == nil { + // establish new iterator and stream updates + var streamingMissedUpdates bool = true + var highestMissedSerial uint64 + updateIterator := &StreamedUpdateIterator{ reader: respBody } + + // stream updates until the response stream + // is interrupted or an error occurs + for updateIterator.Next() { + update := updateIterator.Update() + + // the first chunk of updates are sent + // out of order (non-increasing serials) + // An empty update marks the end of the + // initial chunk of updates which are + // this client catching up with any missed + // updates and the start of receiving + // updates with increasing serial numbers + if streamingMissedUpdates { + if update.IsEmpty() { + // marks end of missed updates + streamingMissedUpdates = false + + if lastSerial < highestMissedSerial { + lastSerial = highestMissedSerial + } + + // Sends an empty update simply to let + // client know about a new stable serial + update.LastStableSerial = lastSerial + updates <- update + + continue + } + + if highestMissedSerial < update.Serial { + highestMissedSerial = update.Serial + } + } + + // All received updates should have a serial number greater than + // the lastSerial provided with the exception of an update with + // a serial number of 0 + if update.Serial <= lastSerial && (lastSerial != 0 || update.Serial != 0) { + errorsChan <- errors.New("Protocol error") + cancel() + break + } else if !streamingMissedUpdates { + lastSerial = update.Serial + } + + // the last stable serial for an update + // sent to a consumer is the last serial number + // associated with some received update such that + // no future update in the future will have a serial + // number lower than that. + update.LastStableSerial = lastSerial + updates <- update + } + + if updateIterator.Error() != nil { + // Only report the error if the context + // wasn't canceled. We don't want to send + // 'context canceled' errors + select { + case <-ctx.Done(): + default: + errorsChan <- updateIterator.Error() + } + } + } else { + // Only report the error if the context + // wasn't canceled. We don't want to send + // 'context canceled' errors + select { + case <-ctx.Done(): + default: + errorsChan <- err + } + } + + // stop if the watcher was cancelled or try + // to re-establish the connection in a moment + select { + case <-ctx.Done(): + return + case <-time.After(c.watchReconnectTimeout): + } + } + }() + + return updates, errorsChan +} + +func (c *HTTPClient) sendRequest(ctx context.Context, httpVerb string, endpointURL string, body []byte) (io.ReadCloser, error) { + u := fmt.Sprintf("%s%s", c.server, endpointURL) + request, err := http.NewRequest(httpVerb, u, bytes.NewReader(body)) + + if err != nil { + return nil, err + } + + request = request.WithContext(ctx) + + resp, err := c.httpClient.Do(request) + + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + errorMessage, err := ioutil.ReadAll(resp.Body) + + resp.Body.Close() + + if err != nil { + return nil, err + } + + return nil, &client.ErrorStatusCode{ Message: string(errorMessage), StatusCode: resp.StatusCode } + } + + return resp.Body, nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/client_relay/entry_iterator.go b/vendor/github.com/armPelionEdge/devicedb/client_relay/entry_iterator.go new file mode 100644 index 0000000..a1f089d --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client_relay/entry_iterator.go @@ -0,0 +1,156 @@ +package client_relay +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "bufio" + "encoding/json" + "errors" + "io" + "github.com/armPelionEdge/devicedb/client" + "github.com/armPelionEdge/devicedb/transport" +) + +type EntryIterator interface { + // Move to the next result. Returns + // false if there is an error or if + // there are no more results to iterate + // through. If there is an error, the + // Error() function will return the + // error that occurred + Next() bool + // Return the prefix that matches + // the key for the current result + Prefix() string + // Return the key for the current + // result + Key() string + // Return the value for the + // current result + Entry() client.Entry + // Return the error that occurred + // while iterating + Error() error +} + +type StreamedEntryIterator struct { + reader io.ReadCloser + scanner *bufio.Scanner + closed bool + err error + key string + prefix string + entry client.Entry +} + +func (iter *StreamedEntryIterator) Next() bool { + if iter.closed { + return false + } + + if iter.scanner == nil { + iter.scanner = bufio.NewScanner(iter.reader) + } + + // prefix + if !iter.scanner.Scan() { + if iter.scanner.Err() != nil { + iter.err = iter.scanner.Err() + } + + iter.close() + + return false + } + + iter.prefix = iter.scanner.Text() + + // key + if !iter.scanner.Scan() { + if iter.scanner.Err() != nil { + iter.err = iter.scanner.Err() + } else { + iter.err = errors.New("Incomplete stream") + } + + iter.close() + + return false + } + + iter.key = iter.scanner.Text() + + // entry + if !iter.scanner.Scan() { + if iter.scanner.Err() != nil { + iter.err = iter.scanner.Err() + } else { + iter.err = errors.New("Incomplete stream") + } + + iter.close() + + return false + } + + var siblingSet transport.TransportSiblingSet + + if err := json.Unmarshal(iter.scanner.Bytes(), &siblingSet); err != nil { + iter.err = err + + iter.close() + + return false + } + + iter.entry.Context = siblingSet.Context + iter.entry.Siblings = siblingSet.Siblings + + return true +} + +func (iter *StreamedEntryIterator) close() { + iter.prefix = "" + iter.key = "" + iter.entry = client.Entry{} + iter.closed = true + iter.reader.Close() +} + +func (iter *StreamedEntryIterator) Prefix() string { + return iter.prefix +} + +func (iter *StreamedEntryIterator) Key() string { + return iter.key +} + +func (iter *StreamedEntryIterator) Entry() client.Entry { + return iter.entry +} + +func (iter *StreamedEntryIterator) Error() error { + return iter.err +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/client_relay/update.go b/vendor/github.com/armPelionEdge/devicedb/client_relay/update.go new file mode 100644 index 0000000..30771cd --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client_relay/update.go @@ -0,0 +1,37 @@ +package client_relay +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type Update struct { + Key string + Serial uint64 + Context string + Siblings []string + LastStableSerial uint64 +} + +func (update *Update) IsEmpty() bool { + return update.Key == "" +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/client_relay/update_iterator.go b/vendor/github.com/armPelionEdge/devicedb/client_relay/update_iterator.go new file mode 100644 index 0000000..5e4e642 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/client_relay/update_iterator.go @@ -0,0 +1,139 @@ +package client_relay +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "bufio" + "encoding/json" + "errors" + "io" + "strings" + "github.com/armPelionEdge/devicedb/transport" +) + +type UpdateIterator interface { + // Move to the next result. Returns + // false if there is an error or if + // there are no more results to iterate + // through. If there is an error, the + // Error() function will return the + // error that occurred + Next() bool + // Return the next update + Update() Update + // Return the error that occurred + // while iterating + Error() error +} + +type StreamedUpdateIterator struct { + reader io.ReadCloser + scanner *bufio.Scanner + closed bool + err error + update Update +} + +func (iter *StreamedUpdateIterator) Next() bool { + if iter.closed { + return false + } + + if iter.scanner == nil { + iter.scanner = bufio.NewScanner(iter.reader) + } + + // data: %s line + if !iter.scanner.Scan() { + if iter.scanner.Err() != nil { + iter.err = iter.scanner.Err() + } + + iter.close() + + return false + } + + if !strings.HasPrefix(iter.scanner.Text(), "data: ") { + // protocol error. + iter.err = errors.New("Protocol error") + + iter.close() + + return false + } + + encodedUpdate := iter.scanner.Text()[len("data: "):] + + if encodedUpdate == "" { + // this is a marker indicating the last of the initial + // pushes of missed messages + iter.update = Update{} + } else { + var update transport.TransportRow + + if err := json.Unmarshal([]byte(encodedUpdate), &update); err != nil { + iter.err = err + + iter.close() + + return false + } + + iter.update = Update{ + Key: update.Key, + Serial: update.LocalVersion, + Context: update.Context, + Siblings: update.Siblings, + } + } + + // consume newline between "data: %s" lines + if !iter.scanner.Scan() { + if iter.scanner.Err() != nil { + iter.err = iter.scanner.Err() + } + + iter.close() + + return false + } + + return true +} + +func (iter *StreamedUpdateIterator) close() { + iter.update = Update{} + iter.closed = true + iter.reader.Close() +} + +func (iter *StreamedUpdateIterator) Update() Update { + return iter.update +} + +func (iter *StreamedUpdateIterator) Error() error { + return iter.err +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/cluster/command.go b/vendor/github.com/armPelionEdge/devicedb/cluster/command.go new file mode 100644 index 0000000..27ca1c3 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/cluster/command.go @@ -0,0 +1,387 @@ +package cluster +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" +) + +type ClusterCommandType int + +// Scenarios +// Adding a node to a cluster +// Decommission a node (graceful) +// Removing a node (forced) +// Repairing a node in a cluster +// Increasing a node's capacity +// Decreasing a node's capacity +// +// Adding a node to a cluster +// ***all changes should be temporary until committed by the release. We should be able to abort the process at any point with no effect +// (phase i synchronized) +// Start ring transaction +// Propose update node capacity +// Update node capacity committed +// Take tokens committed +// (phase ii async) +// whenever token allocation is changed check to see if this node needs to transfer over any partitions +// when i have all the partitions i need propose update to partition holder state until it gets committed +// +// Partition owner vs partition holder +// Partition owner is derived from the token distribution and describes which nodes are responsible for which partitions +// Partition holder is the actual map of which nodes hold the authoritative data for which partitions. Since the token distribution might change many times +// between background partition transfers, you need to keep track of these things independently +// +// After token change I need a partition I don't currently have +// I know who has the partition I need by looking at the holders map +// I ask the node that holds this partition to give me a copy +// When the transfer is complete I propose an entry that makes me the holder of this partition +// I can only accept writes to the partition after holder assignment has been committed +// +// What do we even put in the log? +// For now lets assume only token state changes go into the log +// 1) Some nodes may have an old view of the token distribution at any given time +// 2) If a node has log entry 8 they also have log entries 1-7 +// 3) How do we prevent a partition from being transferred to a node that no longer owns it if that node hasn't yet gotten the update that it no longer owns it? +// 4) How do we reliably keep track of who has a copy of which partition +// 5) Latest log entry index can be used as an indication of the latest configuration version that a node is aware of (a logical clock) +// 6) A node may receive a configuration change and then query other nodes to initiate a partition transfer. It should include its configuration version. +// the other nodes should inform it if their config is newer and do not agree that the requester is an owner of this partition so it can cancel its transfer +// 7) A node may receive a new configuration change while it's in the middle of a transfer that makes that partition no longer owned by it. In that case it should +// cancel the transfer +// 8) Accepting updates to a partition though?? When to lock or unlock it for writes. Maybe we just don't care. Maybe we just merge conflicting partition histories? +// a) Propose conf change for partition ownership change +// b) Propose conf change for partition holder change +// +// A node locks updates to a partition when they no longer owns it. In other words when it receives a committed (a) +// A node keeps requesting transfer of a partition from its current holder until the transfer is complete or it receives an (a) in its commit log transferring ownership away from it +// the current holder will continually refuse the transfer as long as the partition is not write lock. In other words as long as the current holder also believes themselves to be +// the owner they will not transfer it. +// A node only proposes (b) if they have fully transferred the partition from what they believe is the current holder +// +// Node receiving (a) only commits those changes to the config if it doesn't conflict with the allocation constraints +// Node receiving (b): +// rule 1) if the node receiving the holder transfer thinks that the proposer of that holder transfer is not the owner then the holder transfer is not commmited to the config +// rule 2) if the proposer == the owner then make that node the holder as well +// rule 3) if the node receiving (b) is the node that proposed it and it is still the owner then unlock the partition +// +// Safety guarantees +// 1) As long as a primary doesn't disappear forever there can be no lost updates +// 2) A partition is unlocked at at most one node at a time +// +// +// + +const ( + ClusterUpdateNode ClusterCommandType = iota + ClusterAddNode ClusterCommandType = iota + ClusterRemoveNode ClusterCommandType = iota + ClusterTakePartitionReplica ClusterCommandType = iota + ClusterSetReplicationFactor ClusterCommandType = iota + ClusterSetPartitionCount ClusterCommandType = iota + + ClusterAddSite ClusterCommandType = iota + ClusterRemoveSite ClusterCommandType = iota + ClusterAddRelay ClusterCommandType = iota + ClusterRemoveRelay ClusterCommandType = iota + ClusterMoveRelay ClusterCommandType = iota + ClusterSnapshot ClusterCommandType = iota +) + +type ClusterCommand struct { + Type ClusterCommandType + SubmitterID uint64 + CommandID uint64 + Data []byte +} + +type ClusterUpdateNodeBody struct { + NodeID uint64 + NodeConfig NodeConfig +} + +type ClusterAddNodeBody struct { + NodeID uint64 + NodeConfig NodeConfig +} + +type ClusterRemoveNodeBody struct { + NodeID uint64 + ReplacementNodeID uint64 +} + +type ClusterTakePartitionReplicaBody struct { + Partition uint64 + Replica uint64 + NodeID uint64 +} + +type ClusterSetReplicationFactorBody struct { + ReplicationFactor uint64 +} + +type ClusterSetPartitionCountBody struct { + Partitions uint64 +} + +type ClusterAddSiteBody struct { + SiteID string +} + +type ClusterRemoveSiteBody struct { + SiteID string +} + +type ClusterAddRelayBody struct { + RelayID string +} + +type ClusterRemoveRelayBody struct { + RelayID string +} + +type ClusterMoveRelayBody struct { + RelayID string + SiteID string +} + +type ClusterSnapshotBody struct { + UUID string +} + +func EncodeClusterCommand(command ClusterCommand) ([]byte, error) { + encodedCommand, err := json.Marshal(command) + + if err != nil { + return nil, err + } + + return encodedCommand, nil +} + +func DecodeClusterCommand(encodedCommand []byte) (ClusterCommand, error) { + var command ClusterCommand + + err := json.Unmarshal(encodedCommand, &command) + + if err != nil { + return ClusterCommand{}, err + } + + return command, nil +} + +func CreateClusterCommand(commandType ClusterCommandType, body interface{}) (ClusterCommand, error) { + encodedBody, err := json.Marshal(body) + + if err != nil { + return ClusterCommand{}, ECouldNotParseCommand + } + + switch commandType { + case ClusterUpdateNode: + if _, ok := body.(ClusterUpdateNodeBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterAddNode: + if _, ok := body.(ClusterAddNodeBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterRemoveNode: + if _, ok := body.(ClusterRemoveNodeBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterTakePartitionReplica: + if _, ok := body.(ClusterTakePartitionReplicaBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterSetReplicationFactor: + if _, ok := body.(ClusterSetReplicationFactorBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterSetPartitionCount: + if _, ok := body.(ClusterSetPartitionCountBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterAddSite: + if _, ok := body.(ClusterAddSiteBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterRemoveSite: + if _, ok := body.(ClusterRemoveSiteBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterAddRelay: + if _, ok := body.(ClusterAddRelayBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterRemoveRelay: + if _, ok := body.(ClusterRemoveRelayBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterMoveRelay: + if _, ok := body.(ClusterMoveRelayBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + case ClusterSnapshot: + if _, ok := body.(ClusterSnapshotBody); !ok { + return ClusterCommand{}, ECouldNotParseCommand + } + default: + return ClusterCommand{ }, ENoSuchCommand + } + + return ClusterCommand{ Type: commandType, Data: encodedBody }, nil +} + +func EncodeClusterCommandBody(body interface{}) ([]byte, error) { + encodedBody, err := json.Marshal(body) + + if err != nil { + return nil, ECouldNotParseCommand + } + + return encodedBody, nil +} + +func DecodeClusterCommandBody(command ClusterCommand) (interface{}, error) { + switch command.Type { + case ClusterUpdateNode: + var body ClusterUpdateNodeBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterAddNode: + var body ClusterAddNodeBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterRemoveNode: + var body ClusterRemoveNodeBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterTakePartitionReplica: + var body ClusterTakePartitionReplicaBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterSetReplicationFactor: + var body ClusterSetReplicationFactorBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterSetPartitionCount: + var body ClusterSetPartitionCountBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterAddSite: + var body ClusterAddSiteBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterRemoveSite: + var body ClusterRemoveSiteBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterAddRelay: + var body ClusterAddRelayBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterRemoveRelay: + var body ClusterRemoveRelayBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterMoveRelay: + var body ClusterMoveRelayBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + case ClusterSnapshot: + var body ClusterSnapshotBody + + if err := json.Unmarshal(command.Data, &body); err != nil { + break + } + + return body, nil + default: + return nil, ENoSuchCommand + } + + return nil, ECouldNotParseCommand +} + +// assign tokens +// maybe the token assignment entry doesn't contain any tokens? maybe it just determines a deterministic order for assigning tokens? +// Scenarios +// Adding a node to a cluster +// LOG: ... [ ADD NODE i ] ... [ UPDATE TOKEN ASSIGNMENTS FOR NODE i (capacity = 40 GiB) ] ... +// Decommission a node (graceful) +// LOG: ... [ UPDATE TOKEN ASSIGNMENTS FOR NODE i (capacity = 0 GiB) ] ... [ REMOVE NODE i ] ... +// Removing a node (forced) +// LOG: ... [ REMOVE NODE i ] ... +// Repairing a node in a cluster +// +// Updating a node's capacity +// LOG: ... [ UPDATE TOKEN ASSIGNMENTS FOR NODE i (capacity = 80 GiB) ] ... increase +// LOG: ... [ UPDATE TOKEN ASSIGNMENTS FOR NODE i (capacity = 20 GiB) ] ... decrease diff --git a/vendor/github.com/armPelionEdge/devicedb/cluster/config_controller.go b/vendor/github.com/armPelionEdge/devicedb/cluster/config_controller.go new file mode 100644 index 0000000..e66d294 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/cluster/config_controller.go @@ -0,0 +1,548 @@ +// This module bridges the gap between the cluster configuration controller +// and the raft library +package cluster +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/armPelionEdge/devicedb/raft" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/util" + + raftEtc "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + + "context" + "errors" + "sync" + "time" +) + +const ProposalRetryPeriodSeconds = 15 + +type ClusterConfigController interface { + LogDump() (raftpb.Snapshot, []raftpb.Entry, error) + AddNode(ctx context.Context, nodeConfig NodeConfig) error + ReplaceNode(ctx context.Context, replacedNodeID uint64, replacementNodeID uint64) error + RemoveNode(ctx context.Context, nodeID uint64) error + ClusterCommand(ctx context.Context, commandBody interface{}) error + OnLocalUpdates(cb func(deltas []ClusterStateDelta)) + OnClusterSnapshot(cb func(snapshotIndex uint64, snapshotId string)) + ClusterController() *ClusterController + Start() error + Stop() + CancelProposals() +} + +var EBadContext = errors.New("The node addition or removal had an invalid context") +var ERaftNodeStartup = errors.New("Encountered an error while starting up raft controller") +var ERaftProtocolError = errors.New("Raft controller encountered a protocol error") +var ECancelled = errors.New("The request was cancelled") +var EStopped = errors.New("The server was stopped") + +type proposalResponse struct { + err error +} + +type ConfigController struct { + raftNode *raft.RaftNode + raftTransport *raft.TransportHub + clusterController *ClusterController + requestMap *RequestMap + stop chan int + restartLock sync.Mutex + lock sync.Mutex + pendingProposals map[uint64]func() + proposalsCancelled bool + onLocalUpdatesCB func([]ClusterStateDelta) + onClusterSnapshotCB func(uint64, string) + // entryLog serves as an + // easily accsessible record of what happened + // at this node to bring its state to what it + // is now. These fields are used by the log_dump + // and allow a developer to debug cluster state + // inconsistencies. + entryLog []raftpb.Entry +} + +func NewConfigController(raftNode *raft.RaftNode, raftTransport *raft.TransportHub, clusterController *ClusterController) *ConfigController { + return &ConfigController{ + raftNode: raftNode, + raftTransport: raftTransport, + clusterController: clusterController, + requestMap: NewRequestMap(), + pendingProposals: make(map[uint64]func()), + entryLog: make([]raftpb.Entry, 0), + } +} + +func (cc *ConfigController) LogDump() (raftpb.Snapshot, []raftpb.Entry, error) { + var baseSnapshot raftpb.Snapshot + + baseSnapshot, err := cc.raftNode.LastSnapshot() + + if err != nil { + return raftpb.Snapshot{}, []raftpb.Entry{ }, err + } + + entries, err := cc.raftNode.CommittedEntries() + + if err != nil { + return raftpb.Snapshot{}, []raftpb.Entry{ }, err + } + + return baseSnapshot, entries, nil +} + +func (cc *ConfigController) CancelProposals() { + // Cancels all current proposals and prevents any future ones from being made, causing them all to return ECancelled + cc.lock.Lock() + defer cc.lock.Unlock() + + cc.proposalsCancelled = true + + for _, cancel := range cc.pendingProposals { + cancel() + } +} + +func (cc *ConfigController) unregisterProposal(id uint64) { + cc.lock.Lock() + defer cc.lock.Unlock() + + delete(cc.pendingProposals, id) +} + +func (cc *ConfigController) AddNode(ctx context.Context, nodeConfig NodeConfig) error { + encodedAddCommandBody, _ := EncodeClusterCommandBody(ClusterAddNodeBody{ NodeID: nodeConfig.Address.NodeID, NodeConfig: nodeConfig }) + addCommand := ClusterCommand{ Type: ClusterAddNode, Data: encodedAddCommandBody, SubmitterID: cc.clusterController.LocalNodeID, CommandID: cc.nextCommandID() } + addContext, _ := EncodeClusterCommand(addCommand) + + cc.lock.Lock() + if cc.proposalsCancelled { + cc.lock.Unlock() + return ECancelled + } + + ctx, cancel := context.WithCancel(ctx) + cc.pendingProposals[addCommand.CommandID] = cancel + defer cc.unregisterProposal(addCommand.CommandID) + cc.lock.Unlock() + + respCh := cc.requestMap.MakeRequest(addCommand.CommandID) + + if err := cc.raftNode.AddNode(ctx, nodeConfig.Address.NodeID, addContext); err != nil { + cc.requestMap.Respond(addCommand.CommandID, nil) + return err + } + + select { + case resp := <-respCh: + return resp.(proposalResponse).err + case <-ctx.Done(): + cc.requestMap.Respond(addCommand.CommandID, nil) + return ECancelled + case <-cc.stop: + cc.requestMap.Respond(addCommand.CommandID, nil) + return EStopped + } +} + +func (cc *ConfigController) ReplaceNode(ctx context.Context, replacedNodeID uint64, replacementNodeID uint64) error { + encodedRemoveCommandBody, _ := EncodeClusterCommandBody(ClusterRemoveNodeBody{ NodeID: replacedNodeID, ReplacementNodeID: replacementNodeID }) + replaceCommand := ClusterCommand{ Type: ClusterRemoveNode, Data: encodedRemoveCommandBody, SubmitterID: cc.clusterController.LocalNodeID, CommandID: cc.nextCommandID() } + replaceContext, _ := EncodeClusterCommand(replaceCommand) + + cc.lock.Lock() + if cc.proposalsCancelled { + cc.lock.Unlock() + return ECancelled + } + + ctx, cancel := context.WithCancel(ctx) + cc.pendingProposals[replaceCommand.CommandID] = cancel + defer cc.unregisterProposal(replaceCommand.CommandID) + cc.lock.Unlock() + + respCh := cc.requestMap.MakeRequest(replaceCommand.CommandID) + + if err := cc.raftNode.RemoveNode(ctx, replacedNodeID, replaceContext); err != nil { + cc.requestMap.Respond(replaceCommand.CommandID, nil) + return err + } + + for { + select { + case resp := <-respCh: + return resp.(proposalResponse).err + case <-time.After(time.Second * ProposalRetryPeriodSeconds): + case <-ctx.Done(): + cc.requestMap.Respond(replaceCommand.CommandID, nil) + return ECancelled + case <-cc.stop: + cc.requestMap.Respond(replaceCommand.CommandID, nil) + return EStopped + } + } +} + +func (cc *ConfigController) RemoveNode(ctx context.Context, nodeID uint64) error { + encodedRemoveCommandBody, _ := EncodeClusterCommandBody(ClusterRemoveNodeBody{ NodeID: nodeID, ReplacementNodeID: 0 }) + removeCommand := ClusterCommand{ Type: ClusterRemoveNode, Data: encodedRemoveCommandBody, SubmitterID: cc.clusterController.LocalNodeID, CommandID: cc.nextCommandID() } + removeContext, _ := EncodeClusterCommand(removeCommand) + + cc.lock.Lock() + if cc.proposalsCancelled { + cc.lock.Unlock() + return ECancelled + } + + ctx, cancel := context.WithCancel(ctx) + cc.pendingProposals[removeCommand.CommandID] = cancel + defer cc.unregisterProposal(removeCommand.CommandID) + cc.lock.Unlock() + + respCh := cc.requestMap.MakeRequest(removeCommand.CommandID) + + if err := cc.raftNode.RemoveNode(ctx, nodeID, removeContext); err != nil { + cc.requestMap.Respond(removeCommand.CommandID, nil) + return err + } + + for { + select { + case resp := <-respCh: + return resp.(proposalResponse).err + case <-time.After(time.Second * ProposalRetryPeriodSeconds): + case <-ctx.Done(): + cc.requestMap.Respond(removeCommand.CommandID, nil) + return ECancelled + case <-cc.stop: + cc.requestMap.Respond(removeCommand.CommandID, nil) + return EStopped + } + } +} + +func (cc *ConfigController) ClusterCommand(ctx context.Context, commandBody interface{}) error { + var command ClusterCommand = ClusterCommand{ + SubmitterID: cc.clusterController.LocalNodeID, + } + + switch commandBody.(type) { + case ClusterAddNodeBody: + command.Type = ClusterAddNode + case ClusterRemoveNodeBody: + command.Type = ClusterRemoveNode + case ClusterUpdateNodeBody: + command.Type = ClusterUpdateNode + case ClusterTakePartitionReplicaBody: + command.Type = ClusterTakePartitionReplica + case ClusterSetReplicationFactorBody: + command.Type = ClusterSetReplicationFactor + case ClusterSetPartitionCountBody: + command.Type = ClusterSetPartitionCount + case ClusterAddSiteBody: + command.Type = ClusterAddSite + case ClusterRemoveSiteBody: + command.Type = ClusterRemoveSite + case ClusterAddRelayBody: + command.Type = ClusterAddRelay + case ClusterRemoveRelayBody: + command.Type = ClusterRemoveRelay + case ClusterMoveRelayBody: + command.Type = ClusterMoveRelay + case ClusterSnapshotBody: + command.Type = ClusterSnapshot + default: + return ENoSuchCommand + } + + encodedCommandBody, _ := EncodeClusterCommandBody(commandBody) + command.Data = encodedCommandBody + command.SubmitterID = cc.clusterController.LocalNodeID + command.CommandID = cc.nextCommandID() + encodedCommand, _ := EncodeClusterCommand(command) + + cc.lock.Lock() + if cc.proposalsCancelled { + cc.lock.Unlock() + return ECancelled + } + + ctx, cancel := context.WithCancel(ctx) + cc.pendingProposals[command.CommandID] = cancel + defer cc.unregisterProposal(command.CommandID) + cc.lock.Unlock() + + respCh := cc.requestMap.MakeRequest(command.CommandID) + + // Note: It is possible that the proposal is lost due to message loss. Proposals are not + // re-sent if being forwarded from follower to leader. It is possible for the call to + // Propose() to accept a proposal and queue it for forwarding to the leader but there is no mechanism + // to re-send the proposal message if it is lost due to network congestion or if + // the leader is down. Since the intention of this method is to block until the given command + // has been committed to the log, it needs to retry proposals after a timeout period. + // + // This issue was discovered when the downloader attempted to submit a cluster command + // that would transfer holdership of a partition replica to the requesting node but the TCP + // connection reached the "file open" limit and some messages were dropped. A way to mitigate + // this as well is to send the message serially instead of at the same time with multiple goroutines + for { + Log.Debugf("Making raft proposal for command %d", command.CommandID) + + if err := cc.raftNode.Propose(ctx, encodedCommand); err != nil { + cc.requestMap.Respond(command.CommandID, nil) + return err + } + + select { + case resp := <-respCh: + Log.Debugf("Command %d accepted", command.CommandID) + return resp.(proposalResponse).err + case <-time.After(time.Second * ProposalRetryPeriodSeconds): + // Time to retry the proposal + Log.Debugf("Re-attempting proposal for command %d", command.CommandID) + case <-ctx.Done(): + cc.requestMap.Respond(command.CommandID, nil) + return ECancelled + case <-cc.stop: + cc.requestMap.Respond(command.CommandID, nil) + return EStopped + } + } +} + +func (cc *ConfigController) OnLocalUpdates(cb func(deltas []ClusterStateDelta)) { + cc.onLocalUpdatesCB = cb +} + +func (cc *ConfigController) OnClusterSnapshot(cb func(snapshotIndex uint64, snapshotId string)) { + cc.onClusterSnapshotCB = cb +} + +func (cc *ConfigController) Start() error { + cc.restartLock.Lock() + restored := make(chan int, 1) + replayDone := false + cc.stop = make(chan int) + cc.restartLock.Unlock() + cc.clusterController.DisableNotifications() + + cc.raftTransport.OnReceive(func(ctx context.Context, msg raftpb.Message) error { + return cc.raftNode.Receive(ctx, msg) + }) + + cc.raftNode.OnMessages(func(messages []raftpb.Message) error { + // This used to send messages in parallel using one goroutine per + // message but this overwhelms the TCP connection and results + // in more lost messages. Should send serially + for _, msg := range messages { + err := cc.raftTransport.Send(context.TODO(), msg, false) + + if err != nil { + cc.raftNode.ReportUnreachable(msg.To) + + if msg.Type == raftpb.MsgSnap { + cc.raftNode.ReportSnapshot(msg.To, raftEtc.SnapshotFailure) + } + } else if msg.Type == raftpb.MsgSnap { + cc.raftNode.ReportSnapshot(msg.To, raftEtc.SnapshotFinish) + } + } + + return nil + }) + + cc.raftNode.OnSnapshot(func(snap raftpb.Snapshot) error { + err := cc.clusterController.ApplySnapshot(snap.Data) + + if err != nil { + return err + } + + for _, nodeConfig := range cc.clusterController.State.Nodes { + cc.raftTransport.AddPeer(nodeConfig.Address) + } + + if replayDone { + if cc.onLocalUpdatesCB != nil && len(cc.clusterController.Deltas()) > 0 { + cc.onLocalUpdatesCB(cc.clusterController.Deltas()) + } + } + + return nil + }) + + cc.raftNode.OnCommittedEntry(func(entry raftpb.Entry) error { + Log.Debugf("New entry at node %d [%d]: %v", cc.clusterController.LocalNodeID, entry.Index, entry) + + var encodedClusterCommand []byte + var clusterCommand ClusterCommand + var clusterCommandBody interface{} + + switch entry.Type { + case raftpb.EntryConfChange: + var confChange raftpb.ConfChange + var err error + + if err := confChange.Unmarshal(entry.Data); err != nil { + return err + } + + clusterCommand, err = DecodeClusterCommand(confChange.Context) + + if err != nil { + return err + } + + clusterCommandBody, err = DecodeClusterCommandBody(clusterCommand) + + if err != nil { + return err + } + + switch clusterCommand.Type { + case ClusterAddNode: + if clusterCommandBody.(ClusterAddNodeBody).NodeID != clusterCommandBody.(ClusterAddNodeBody).NodeConfig.Address.NodeID { + return EBadContext + } + case ClusterRemoveNode: + default: + return EBadContext + } + + encodedClusterCommand = confChange.Context + case raftpb.EntryNormal: + var err error + encodedClusterCommand = entry.Data + clusterCommand, err = DecodeClusterCommand(encodedClusterCommand) + + if err != nil { + return err + } + } + + localUpdates, err := cc.clusterController.Step(clusterCommand) + + if err != nil { + if clusterCommand.SubmitterID == cc.clusterController.LocalNodeID { + cc.requestMap.Respond(clusterCommand.CommandID, proposalResponse{ + err: err, + }) + } + + return err + } + + // Only update transport if the cluster config was updated + if entry.Type == raftpb.EntryConfChange { + switch clusterCommand.Type { + case ClusterAddNode: + cc.raftTransport.AddPeer(clusterCommandBody.(ClusterAddNodeBody).NodeConfig.Address) + case ClusterRemoveNode: + cc.raftTransport.RemovePeer(raft.PeerAddress{ NodeID: clusterCommandBody.(ClusterRemoveNodeBody).NodeID }) + } + } + + if clusterCommand.Type == ClusterSnapshot { + body, _ := DecodeClusterCommandBody(clusterCommand) + snapshotMeta := body.(ClusterSnapshotBody) + + if replayDone { + if cc.onClusterSnapshotCB != nil { + if cc.clusterController.LocalNodeIsInCluster() { + cc.onClusterSnapshotCB(entry.Index, snapshotMeta.UUID) + } + } + } + } + + if clusterCommand.SubmitterID == cc.clusterController.LocalNodeID { + cc.requestMap.Respond(clusterCommand.CommandID, proposalResponse{ + err: nil, + }) + } + + if replayDone { + if cc.onLocalUpdatesCB != nil && len(localUpdates) > 0 { + cc.onLocalUpdatesCB(localUpdates) + } + } + + return nil + }) + + cc.raftNode.OnError(func(err error) error { + // indicates that raft node is shutting down + Log.Criticalf("Raft node encountered an unrecoverable error and will now shut down: %v", err) + + return nil + }) + + cc.raftNode.OnReplayDone(func() error { + Log.Debug("OnReplayDone() called") + // cc.clusterController.EnableNotifications() + replayDone = true + restored <- 1 + + return nil + }) + + if err := cc.raftNode.Start(); err != nil { + Log.Criticalf("Unable to start the config controller due to an error while starting up raft node: %v", err.Error()) + + return ERaftNodeStartup + } + + Log.Info("Config controller started up raft node. It is now waiting for log replay...") + + <-restored + + Log.Info("Config controller log replay complete") + + return nil +} + +func (cc *ConfigController) Stop() { + cc.restartLock.Lock() + defer cc.restartLock.Unlock() + + if cc.stop == nil { + return + } + + cc.raftNode.Stop() + close(cc.stop) + cc.stop = nil +} + +func (cc *ConfigController) ClusterController() *ClusterController { + return cc.clusterController +} + +func (cc *ConfigController) nextCommandID() uint64 { + return UUID64() +} diff --git a/vendor/github.com/armPelionEdge/devicedb/cluster/config_controller_builder.go b/vendor/github.com/armPelionEdge/devicedb/cluster/config_controller_builder.go new file mode 100644 index 0000000..f0d818c --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/cluster/config_controller_builder.go @@ -0,0 +1,100 @@ +package cluster +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/raft" +) + +type ClusterConfigControllerBuilder interface { + SetCreateNewCluster(b bool) ClusterConfigControllerBuilder + SetLocalNodeAddress(peerAddress PeerAddress) ClusterConfigControllerBuilder + SetRaftNodeStorage(raftStorage RaftNodeStorage) ClusterConfigControllerBuilder + SetRaftNodeTransport(transport *TransportHub) ClusterConfigControllerBuilder + Create() ClusterConfigController +} + +type ConfigControllerBuilder struct { + createNewCluster bool + localNodeAddress PeerAddress + raftStorage RaftNodeStorage + raftTransport *TransportHub +} + +func (builder *ConfigControllerBuilder) SetCreateNewCluster(b bool) ClusterConfigControllerBuilder { + builder.createNewCluster = b + + return builder +} + +func (builder *ConfigControllerBuilder) SetLocalNodeAddress(address PeerAddress) ClusterConfigControllerBuilder { + builder.localNodeAddress = address + + return builder +} + +func (builder *ConfigControllerBuilder) SetRaftNodeStorage(raftStorage RaftNodeStorage) ClusterConfigControllerBuilder { + builder.raftStorage = raftStorage + + return builder +} + +func (builder *ConfigControllerBuilder) SetRaftNodeTransport(transport *TransportHub) ClusterConfigControllerBuilder { + builder.raftTransport = transport + + return builder +} + +func (builder *ConfigControllerBuilder) Create() ClusterConfigController { + addNodeBody, _ := EncodeClusterCommandBody(ClusterAddNodeBody{ + NodeID: builder.localNodeAddress.NodeID, + NodeConfig: NodeConfig{ + Address: PeerAddress{ + NodeID: builder.localNodeAddress.NodeID, + Host: builder.localNodeAddress.Host, + Port: builder.localNodeAddress.Port, + }, + Capacity: 1, + }, + }) + addNodeContext, _ := EncodeClusterCommand(ClusterCommand{ Type: ClusterAddNode, Data: addNodeBody }) + clusterController := &ClusterController{ + LocalNodeID: builder.localNodeAddress.NodeID, + State: ClusterState{ }, + PartitioningStrategy: &SimplePartitioningStrategy{ }, + LocalUpdates: make(chan []ClusterStateDelta), + } + raftNode := NewRaftNode(&RaftNodeConfig{ + ID: builder.localNodeAddress.NodeID, + CreateClusterIfNotExist: builder.createNewCluster, + Context: addNodeContext, + Storage: builder.raftStorage, + GetSnapshot: func() ([]byte, error) { + return clusterController.State.Snapshot() + }, + }) + + return NewConfigController(raftNode, builder.raftTransport, clusterController) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/cluster/controller.go b/vendor/github.com/armPelionEdge/devicedb/cluster/controller.go new file mode 100644 index 0000000..2f4f2d1 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/cluster/controller.go @@ -0,0 +1,889 @@ +package cluster +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "errors" + "sync" + "sort" + "github.com/armPelionEdge/devicedb/raft" + + . "github.com/armPelionEdge/devicedb/logging" +) + +var ENoSuchCommand = errors.New("The cluster command type is not supported") +var ENoSuchNode = errors.New("The node specified in the update does not exist") +var ENoSuchSite = errors.New("The specified site does not exist") +var ENoSuchRelay = errors.New("The specified relay does not exist") +var ENodeDoesNotOwnReplica = errors.New("A node tried to transfer a partition replica to itself but it no longer owns that replica") +var ECouldNotParseCommand = errors.New("The cluster command data was not properly formatted. Unable to parse it.") +var EReplicaNumberInvalid = errors.New("The command specified an invalid replica number for a partition.") + +type ClusterController struct { + LocalNodeID uint64 + State ClusterState + PartitioningStrategy PartitioningStrategy + LocalUpdates chan []ClusterStateDelta + notificationsEnabled bool + notificationsEnabledLock sync.Mutex + stateUpdateLock sync.Mutex + nextDeltaSet []ClusterStateDelta + localNodeOwnedPartitionReplicaCache map[uint64]map[uint64]bool + partitionOwnersCache map[uint64][]uint64 +} + +func (clusterController *ClusterController) Partition(key string) uint64 { + if clusterController.State.ClusterSettings.AreInitialized() { + return clusterController.PartitioningStrategy.Partition(key, clusterController.State.ClusterSettings.Partitions) + } + + return 0 +} + +func (clusterController *ClusterController) EnableNotifications() { + clusterController.notificationsEnabledLock.Lock() + defer clusterController.notificationsEnabledLock.Unlock() + clusterController.notificationsEnabled = true +} + +func (clusterController *ClusterController) DisableNotifications() { + clusterController.notificationsEnabledLock.Lock() + defer clusterController.notificationsEnabledLock.Unlock() + clusterController.notificationsEnabled = false +} + +func (clusterController *ClusterController) Step(clusterCommand ClusterCommand) ([]ClusterStateDelta, error) { + body, err := DecodeClusterCommandBody(clusterCommand) + + if err != nil { + return nil, ECouldNotParseCommand + } + + + clusterController.stateUpdateLock.Lock() + clusterController.nextDeltaSet = []ClusterStateDelta{ } + + switch clusterCommand.Type { + case ClusterUpdateNode: + err = clusterController.UpdateNodeConfig(body.(ClusterUpdateNodeBody)) + case ClusterAddNode: + err = clusterController.AddNode(body.(ClusterAddNodeBody)) + case ClusterRemoveNode: + err = clusterController.RemoveNode(body.(ClusterRemoveNodeBody)) + case ClusterTakePartitionReplica: + err = clusterController.TakePartitionReplica(body.(ClusterTakePartitionReplicaBody)) + case ClusterSetReplicationFactor: + err = clusterController.SetReplicationFactor(body.(ClusterSetReplicationFactorBody)) + case ClusterSetPartitionCount: + err = clusterController.SetPartitionCount(body.(ClusterSetPartitionCountBody)) + case ClusterAddSite: + err = clusterController.AddSite(body.(ClusterAddSiteBody)) + case ClusterRemoveSite: + err = clusterController.RemoveSite(body.(ClusterRemoveSiteBody)) + case ClusterAddRelay: + err = clusterController.AddRelay(body.(ClusterAddRelayBody)) + case ClusterRemoveRelay: + err = clusterController.RemoveRelay(body.(ClusterRemoveRelayBody)) + case ClusterMoveRelay: + err = clusterController.MoveRelay(body.(ClusterMoveRelayBody)) + case ClusterSnapshot: + // Do nothing + err = nil + default: + return nil, ENoSuchCommand + } + + clusterController.stateUpdateLock.Unlock() + clusterController.notificationsEnabledLock.Lock() + + if clusterController.notificationsEnabled { + clusterController.LocalUpdates <- clusterController.nextDeltaSet + } + + clusterController.notificationsEnabledLock.Unlock() + + return clusterController.nextDeltaSet, err +} + +func (clusterController *ClusterController) Deltas() []ClusterStateDelta { + return clusterController.nextDeltaSet +} + +// Apply a snapshot to the state and notify on the local updates channel of any relevant +// changes +func (clusterController *ClusterController) ApplySnapshot(snap []byte) error { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + clusterController.clearPartitionOwnersCache() + clusterController.localNodeOwnedPartitionReplicaCache = nil + localNodeOwnedPartitionReplica := clusterController.localNodeOwnedPartitionReplicas() + localNodeTokenSnapshot := clusterController.localNodeTokenSnapshot() + localNodePartitionReplicaSnapshot := clusterController.localNodePartitionReplicaSnapshot() + relaysSnapshot := clusterController.relaysSnapshot() + sitesSnapshot := clusterController.sitesSnapshot() + _, localNodeWasPresentBefore := clusterController.State.Nodes[clusterController.LocalNodeID] + + if err := clusterController.State.Recover(snap); err != nil { + return err + } + + nodeConfig, localNodeIsPresentNow := clusterController.State.Nodes[clusterController.LocalNodeID] + + if !localNodeWasPresentBefore && localNodeIsPresentNow { + // This node was added. Provide an add node delta + clusterController.notifyLocalNode(DeltaNodeAdd, NodeAdd{ NodeID: clusterController.LocalNodeID, NodeConfig: *nodeConfig }) + } + + clusterController.localDiffTokensAndNotify(localNodeTokenSnapshot) + clusterController.localDiffOwnedPartitionReplicasAndNotify(localNodeOwnedPartitionReplica) + clusterController.localDiffPartitionReplicasAndNotify(localNodePartitionReplicaSnapshot) + clusterController.diffRelaysAndNotify(relaysSnapshot) + clusterController.diffSitesAndNotify(sitesSnapshot) + + if localNodeWasPresentBefore && !localNodeIsPresentNow { + // This node was removed. Provide a remove node delta + clusterController.notifyLocalNode(DeltaNodeRemove, NodeRemove{ NodeID: clusterController.LocalNodeID }) + } + + return nil +} + +func (clusterController *ClusterController) relaysSnapshot() map[string]string { + var relays map[string]string = make(map[string]string) + + for relay, site := range clusterController.State.Relays { + relays[relay] = site + } + + return relays +} + +func (clusterController *ClusterController) sitesSnapshot() map[string]bool { + var sites map[string]bool = make(map[string]bool) + + for site, _ := range clusterController.State.Sites { + sites[site] = true + } + + return sites +} + +func (clusterController *ClusterController) diffRelaysAndNotify(relaysSnapshot map[string]string) { + for relay, site := range relaysSnapshot { + if _, ok := clusterController.State.Relays[relay]; !ok { + clusterController.notifyLocalNode(DeltaRelayRemoved, RelayRemoved{ RelayID: relay }) + } else if clusterController.State.Relays[relay] != site { + clusterController.notifyLocalNode(DeltaRelayMoved, RelayMoved{ RelayID: relay, SiteID: clusterController.State.Relays[relay] }) + } + } + + for relay, _ := range clusterController.State.Relays { + if _, ok := relaysSnapshot[relay]; !ok { + clusterController.notifyLocalNode(DeltaRelayAdded, RelayAdded{ RelayID: relay }) + } + } +} + +func (clusterController *ClusterController) diffSitesAndNotify(sitesSnapshot map[string]bool) { + for site, _ := range sitesSnapshot { + if _, ok := clusterController.State.Sites[site]; !ok { + clusterController.notifyLocalNode(DeltaSiteRemoved, SiteRemoved{ SiteID: site }) + } + } + + for site, _ := range clusterController.State.Sites { + if _, ok := sitesSnapshot[site]; !ok { + clusterController.notifyLocalNode(DeltaSiteAdded, SiteAdded{ SiteID: site }) + } + } +} + +func (clusterController *ClusterController) UpdateNodeConfig(clusterCommand ClusterUpdateNodeBody) error { + currentNodeConfig, ok := clusterController.State.Nodes[clusterCommand.NodeID] + + if !ok { + // No such node + return nil + } + + currentNodeConfig.Address.Host = clusterCommand.NodeConfig.Address.Host + currentNodeConfig.Address.Port = clusterCommand.NodeConfig.Address.Port + + if clusterCommand.NodeConfig.Capacity != currentNodeConfig.Capacity { + currentNodeConfig.Capacity = clusterCommand.NodeConfig.Capacity + + // a capacity change with any node means tokens need to be redistributed to account for different + // relative capacity of the nodes. This has no effect with the simple partitioning strategy unless + // a node has been assigned capacity 0 indicating that it is leaving the cluster soon + + if clusterController.State.ClusterSettings.AreInitialized() { + clusterController.assignTokens() + } + } + + return nil +} + +func (clusterController *ClusterController) AddNode(clusterCommand ClusterAddNodeBody) error { + // If a node already exists with this node ID then this request should be ignored + if _, ok := clusterController.State.Nodes[clusterCommand.NodeID]; ok { + Log.Warningf("Ignoring request to add a node whose id = %d because a node with that ID already exists in the cluster", clusterCommand.NodeID) + + return raft.ECancelConfChange + } + + // Ensure that a node ID is not reused if it was used by a node that used to belong to the cluster + if clusterController.State.RemovedNodes != nil { + if _, ok := clusterController.State.RemovedNodes[clusterCommand.NodeID]; ok { + Log.Warningf("Ignoring request to add a node whose id = %d because a node with that ID used to exist in the cluster", clusterCommand.NodeID) + + return raft.ECancelConfChange + } + } + + // add the node if it isn't already added + clusterCommand.NodeConfig.Tokens = make(map[uint64]bool) + clusterCommand.NodeConfig.PartitionReplicas = make(map[uint64]map[uint64]bool) + clusterCommand.NodeConfig.OwnedPartitionReplicas = make(map[uint64]map[uint64]bool) + + clusterController.State.AddNode(clusterCommand.NodeConfig) + + if clusterCommand.NodeID == clusterController.LocalNodeID { + // notify the local node that it has been added to the cluster + clusterController.notifyLocalNode(DeltaNodeAdd, NodeAdd{ NodeID: clusterController.LocalNodeID, NodeConfig: clusterCommand.NodeConfig }) + } + + // redistribute tokens in the cluster. tokens will be reassigned from other nodes to this node to distribute the load + if clusterController.State.ClusterSettings.AreInitialized() { + clusterController.assignTokens() + } + + return nil +} + +func (clusterController *ClusterController) RemoveNode(clusterCommand ClusterRemoveNodeBody) error { + replacementNode, ok := clusterController.State.Nodes[clusterCommand.ReplacementNodeID] + + if (!ok && clusterCommand.ReplacementNodeID != 0) || (ok && len(replacementNode.Tokens) != 0) || clusterCommand.ReplacementNodeID == clusterCommand.NodeID { + // configuration change should be cancelled if the replacement node does not exist, the node already has a token assignment or it is the node being removed + return raft.ECancelConfChange + } + + if _, ok := clusterController.State.Nodes[clusterCommand.NodeID]; ok { + if ok && clusterCommand.ReplacementNodeID != 0 { + // assign tokens that this node owned to another token + clusterController.reassignTokens(clusterCommand.NodeID, clusterCommand.ReplacementNodeID) + } + + // remove the node if it isn't already removed + clusterController.State.RemoveNode(clusterCommand.NodeID) + + if (!ok || clusterCommand.ReplacementNodeID == 0) && clusterController.State.ClusterSettings.AreInitialized() { + // redistribute tokens in the cluster, making sure to distribute tokens that were owned by this node to other nodes + clusterController.assignTokens() + } + + if clusterCommand.NodeID == clusterController.LocalNodeID { + // notify the local node that it has been removed from the cluster + clusterController.notifyLocalNode(DeltaNodeRemove, NodeRemove{ NodeID: clusterController.LocalNodeID }) + } + } + + return nil +} + +func (clusterController *ClusterController) TakePartitionReplica(clusterCommand ClusterTakePartitionReplicaBody) error { + localNodePartitionReplicaSnapshot := clusterController.localNodePartitionReplicaSnapshot() + + partitionOwners := clusterController.partitionOwners(clusterCommand.Partition) + + if clusterCommand.Replica >= uint64(len(partitionOwners)) || len(partitionOwners) == 0 { + // May be best to return an error + return EReplicaNumberInvalid + } + + if partitionOwners[int(clusterCommand.Replica)] != clusterCommand.NodeID { + // If a node does not own a partition replica it cannot become the holder. + // It is ok for a node to lose ownership and remain the holder but a node + // must be the owner to hold it initially. + return ENodeDoesNotOwnReplica + } + + if err := clusterController.State.AssignPartitionReplica(clusterCommand.Partition, clusterCommand.Replica, clusterCommand.NodeID); err != nil { + // Log Error + return err + } + + clusterController.localDiffPartitionReplicasAndNotify(localNodePartitionReplicaSnapshot) + + return nil +} + +func (clusterController *ClusterController) localNodePartitionReplicaSnapshot() map[uint64]map[uint64]bool { + nodeConfig, ok := clusterController.State.Nodes[clusterController.LocalNodeID] + + if !ok { + return map[uint64]map[uint64]bool{ } + } + + partitionReplicaSnapshot := make(map[uint64]map[uint64]bool, len(nodeConfig.PartitionReplicas)) + + for partition, replicas := range nodeConfig.PartitionReplicas { + partitionReplicaSnapshot[partition] = make(map[uint64]bool, len(replicas)) + + for replica, _ := range replicas { + partitionReplicaSnapshot[partition][replica] = true + } + } + + return partitionReplicaSnapshot +} + +func (clusterController *ClusterController) localDiffPartitionReplicasAndNotify(partitionReplicaSnapshot map[uint64]map[uint64]bool) { + nodeConfig, ok := clusterController.State.Nodes[clusterController.LocalNodeID] + + if !ok { + return + } + + // find out which partition replicas have been lost + for partition, replicas := range partitionReplicaSnapshot { + for replica, _ := range replicas { + if _, ok := nodeConfig.PartitionReplicas[partition]; !ok { + clusterController.notifyLocalNode(DeltaNodeLosePartitionReplica, NodeLosePartitionReplica{ NodeID: clusterController.LocalNodeID, Partition: partition, Replica: replica }) + + continue + } + + if _, ok := nodeConfig.PartitionReplicas[partition][replica]; !ok { + clusterController.notifyLocalNode(DeltaNodeLosePartitionReplica, NodeLosePartitionReplica{ NodeID: clusterController.LocalNodeID, Partition: partition, Replica: replica }) + } + } + } + + // find out which partition replicas have been gained + for partition, replicas := range nodeConfig.PartitionReplicas { + for replica, _ := range replicas { + if _, ok := partitionReplicaSnapshot[partition]; !ok { + clusterController.notifyLocalNode(DeltaNodeGainPartitionReplica, NodeGainPartitionReplica{ NodeID: clusterController.LocalNodeID, Partition: partition, Replica: replica }) + + continue + } + + if _, ok := partitionReplicaSnapshot[partition][replica]; !ok { + clusterController.notifyLocalNode(DeltaNodeGainPartitionReplica, NodeGainPartitionReplica{ NodeID: clusterController.LocalNodeID, Partition: partition, Replica: replica }) + } + } + } +} + +func (clusterController *ClusterController) Lock() { + clusterController.stateUpdateLock.Lock() +} + +func (clusterController *ClusterController) Unlock() { + clusterController.stateUpdateLock.Unlock() +} + +func (clusterController *ClusterController) SetReplicationFactor(clusterCommand ClusterSetReplicationFactorBody) error { + if clusterController.State.ClusterSettings.ReplicationFactor != 0 { + // The replication factor has already been set and cannot be changed + return nil + } + + clusterController.State.ClusterSettings.ReplicationFactor = clusterCommand.ReplicationFactor + clusterController.initializeClusterIfReady() + + return nil +} + +func (clusterController *ClusterController) SetPartitionCount(clusterCommand ClusterSetPartitionCountBody) error { + if clusterController.State.ClusterSettings.Partitions != 0 { + // The partition count has already been set and cannot be changed + return nil + } + + clusterController.State.ClusterSettings.Partitions = clusterCommand.Partitions + clusterController.initializeClusterIfReady() + + return nil +} + +func (clusterController *ClusterController) AddSite(clusterCommand ClusterAddSiteBody) error { + if clusterController.State.SiteExists(clusterCommand.SiteID) { + return nil + } + + clusterController.State.AddSite(clusterCommand.SiteID) + clusterController.notifyLocalNode(DeltaSiteAdded, SiteAdded{ SiteID: clusterCommand.SiteID }) + + return nil +} + +func (clusterController *ClusterController) RemoveSite(clusterCommand ClusterRemoveSiteBody) error { + if !clusterController.State.SiteExists(clusterCommand.SiteID) { + return nil + } + + clusterController.State.RemoveSite(clusterCommand.SiteID) + clusterController.notifyLocalNode(DeltaSiteRemoved, SiteRemoved{ SiteID: clusterCommand.SiteID }) + + return nil +} + +func (clusterController *ClusterController) AddRelay(clusterCommand ClusterAddRelayBody) error { + if _, ok := clusterController.State.Relays[clusterCommand.RelayID]; ok { + return nil + } + + clusterController.State.AddRelay(clusterCommand.RelayID) + clusterController.notifyLocalNode(DeltaRelayAdded, RelayAdded{ RelayID: clusterCommand.RelayID }) + + return nil +} + +func (clusterController *ClusterController) RemoveRelay(clusterCommand ClusterRemoveRelayBody) error { + if _, ok := clusterController.State.Relays[clusterCommand.RelayID]; !ok { + return nil + } + + clusterController.State.RemoveRelay(clusterCommand.RelayID) + clusterController.notifyLocalNode(DeltaRelayRemoved, RelayRemoved{ RelayID: clusterCommand.RelayID }) + + return nil +} + +func (clusterController *ClusterController) MoveRelay(clusterCommand ClusterMoveRelayBody) error { + if !clusterController.State.SiteExists(clusterCommand.SiteID) && clusterCommand.SiteID != "" { + return ENoSuchSite + } + + if _, ok := clusterController.State.Relays[clusterCommand.RelayID]; !ok { + return ENoSuchRelay + } + + if clusterController.State.Relays[clusterCommand.RelayID] == clusterCommand.SiteID { + return nil + } + + clusterController.State.MoveRelay(clusterCommand.RelayID, clusterCommand.SiteID) + clusterController.notifyLocalNode(DeltaRelayMoved, RelayMoved{ RelayID: clusterCommand.RelayID, SiteID: clusterCommand.SiteID }) + + return nil +} + +func (clusterController *ClusterController) RelaySite(relayID string) string { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + return clusterController.State.Relays[relayID] +} + +func (clusterController *ClusterController) ClusterIsInitialized() bool { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + return clusterController.State.ClusterSettings.AreInitialized() +} + +func (clusterController *ClusterController) ClusterMemberAddress(nodeID uint64) raft.PeerAddress { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + nodeConfig, ok := clusterController.State.Nodes[nodeID] + + if !ok { + return raft.PeerAddress{} + } + + return nodeConfig.Address +} + +func (clusterController *ClusterController) PartitionOwners(partition uint64) []uint64 { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + if partition >= uint64(len(clusterController.State.Partitions)) || !clusterController.State.ClusterSettings.AreInitialized() { + return []uint64{ } + } + + return clusterController.partitionOwners(partition) +} + +func (clusterController *ClusterController) LocalNodeHoldsPartition(partition uint64) bool { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + return len(clusterController.State.Nodes[clusterController.LocalNodeID].PartitionReplicas[partition]) > 0 +} + +func (clusterController *ClusterController) PartitionHolders(partition uint64) []uint64 { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + if partition >= uint64(len(clusterController.State.Partitions)) { + return []uint64{ } + } + + replicas := clusterController.State.Partitions[partition] + holders := make([]uint64, 0, len(replicas)) + holdersMap := make(map[uint64]bool, len(replicas)) + + for _, partitionReplica := range replicas { + if partitionReplica.Holder != 0 { + if _, ok := holdersMap[partitionReplica.Holder]; !ok { + holdersMap[partitionReplica.Holder] = true + holders = append(holders, partitionReplica.Holder) + } + } + } + + return holders +} + +func (clusterController *ClusterController) SiteExists(siteID string) bool { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + _, ok := clusterController.State.Sites[siteID] + + return ok +} + +func (clusterController *ClusterController) LocalNodeHeldPartitionReplicas() []PartitionReplica { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + partitionReplicas := make([]PartitionReplica, 0) + + if !clusterController.State.ClusterSettings.AreInitialized() { + return partitionReplicas + } + + if clusterController.State.Nodes[clusterController.LocalNodeID] == nil { + return partitionReplicas + } + + for partition, replicas := range clusterController.State.Nodes[clusterController.LocalNodeID].PartitionReplicas { + for replica, _ := range replicas { + partitionReplicas = append(partitionReplicas, PartitionReplica{ Partition: partition, Replica: replica }) + } + } + + return partitionReplicas +} + +func (clusterController *ClusterController) LocalNodeOwnedPartitionReplicas() []PartitionReplica { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + partitionReplicas := make([]PartitionReplica, 0) + + if !clusterController.State.ClusterSettings.AreInitialized() { + return partitionReplicas + } + + if clusterController.State.Nodes[clusterController.LocalNodeID] == nil { + return partitionReplicas + } + + for partition, replicas := range clusterController.localNodeOwnedPartitionReplicas() { + for replica, _ := range replicas { + partitionReplicas = append(partitionReplicas, PartitionReplica{ Partition: partition, Replica: replica }) + } + } + + return partitionReplicas +} + +func (clusterController *ClusterController) localNodeOwnedPartitionReplicas() map[uint64]map[uint64]bool { + if !clusterController.State.ClusterSettings.AreInitialized() { + return map[uint64]map[uint64]bool{ } + } + + if clusterController.localNodeOwnedPartitionReplicaCache != nil { + return clusterController.localNodeOwnedPartitionReplicaCache + } + + partitionReplicas := make(map[uint64]map[uint64]bool) + + for i := 0; i < len(clusterController.State.Tokens); i++ { + partitionOwners := clusterController.partitionOwners(uint64(i)) + + for replica, nodeID := range partitionOwners { + if nodeID == clusterController.LocalNodeID { + if _, ok := partitionReplicas[uint64(i)]; !ok { + partitionReplicas[uint64(i)] = make(map[uint64]bool) + } + + partitionReplicas[uint64(i)][uint64(replica)] = true + } + } + } + + clusterController.localNodeOwnedPartitionReplicaCache = partitionReplicas + + //Log.Criticalf("Node %d owns partitions %v", clusterController.LocalNodeID, partitionReplicas) + + return partitionReplicas +} + +func (clusterController *ClusterController) partitionOwners(partition uint64) []uint64 { + if clusterController.partitionOwnersCache == nil { + clusterController.partitionOwnersCache = make(map[uint64][]uint64) + } + + if owners, ok := clusterController.partitionOwnersCache[partition]; ok { + return owners + } + + owners := clusterController.PartitioningStrategy.Owners(clusterController.State.Tokens, partition, clusterController.State.ClusterSettings.ReplicationFactor) + + clusterController.partitionOwnersCache[partition] = owners + + return owners +} + +func (clusterController *ClusterController) clearPartitionOwnersCache() { + clusterController.partitionOwnersCache = nil +} + +func (clusterController *ClusterController) localDiffOwnedPartitionReplicasAndNotify(partitionReplicaSnapshot map[uint64]map[uint64]bool) { + currentPartitionReplicas := clusterController.localNodeOwnedPartitionReplicas() + + // find out which partition replicas have been lost + for partition, replicas := range partitionReplicaSnapshot { + for replica, _ := range replicas { + if _, ok := currentPartitionReplicas[partition]; !ok { + clusterController.notifyLocalNode(DeltaNodeLosePartitionReplicaOwnership, NodeLosePartitionReplicaOwnership{ NodeID: clusterController.LocalNodeID, Partition: partition, Replica: replica }) + + continue + } + + if _, ok := currentPartitionReplicas[partition][replica]; !ok { + clusterController.notifyLocalNode(DeltaNodeLosePartitionReplicaOwnership, NodeLosePartitionReplicaOwnership{ NodeID: clusterController.LocalNodeID, Partition: partition, Replica: replica }) + } + } + } + + // find out which partition replicas have been gained + for partition, replicas := range currentPartitionReplicas { + for replica, _ := range replicas { + if _, ok := partitionReplicaSnapshot[partition]; !ok { + clusterController.notifyLocalNode(DeltaNodeGainPartitionReplicaOwnership, NodeGainPartitionReplicaOwnership{ NodeID: clusterController.LocalNodeID, Partition: partition, Replica: replica }) + + continue + } + + if _, ok := partitionReplicaSnapshot[partition][replica]; !ok { + clusterController.notifyLocalNode(DeltaNodeGainPartitionReplicaOwnership, NodeGainPartitionReplicaOwnership{ NodeID: clusterController.LocalNodeID, Partition: partition, Replica: replica }) + } + } + } +} + +func (clusterController *ClusterController) LocalNodeConfig() *NodeConfig { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + return clusterController.State.Nodes[clusterController.LocalNodeID] +} + +func (clusterController *ClusterController) LocalPartitionReplicasCount() int { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + replicaCount := 0 + localNodeConfig := clusterController.State.Nodes[clusterController.LocalNodeID] + + if localNodeConfig == nil { + return replicaCount + } + + for _, partitions := range localNodeConfig.PartitionReplicas { + for _, _ = range partitions { + replicaCount += 1 + } + } + + return replicaCount +} + +func (clusterController *ClusterController) LocalNodeIsInCluster() bool { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + _, ok := clusterController.State.Nodes[clusterController.LocalNodeID] + + return ok +} + +func (clusterController *ClusterController) NodeIsInCluster(nodeID uint64) bool { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + _, ok := clusterController.State.Nodes[nodeID] + + return ok +} + +func (clusterController *ClusterController) LocalNodeWasRemovedFromCluster() bool { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + _, ok := clusterController.State.RemovedNodes[clusterController.LocalNodeID] + + return ok +} + +func (clusterController *ClusterController) ClusterNodes() map[uint64]bool { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + nodeMap := make(map[uint64]bool) + + for node, _ := range clusterController.State.Nodes { + nodeMap[node] = true + } + + return nodeMap +} + +func (clusterController *ClusterController) ClusterNodeConfigs() []NodeConfig { + clusterController.stateUpdateLock.Lock() + defer clusterController.stateUpdateLock.Unlock() + + nodeConfigs := make([]NodeConfig, 0, len(clusterController.State.Nodes)) + + for _, config := range clusterController.State.Nodes { + nodeConfigs = append(nodeConfigs, *config) + } + + return nodeConfigs +} + +func (clusterController *ClusterController) initializeClusterIfReady() { + if !clusterController.State.ClusterSettings.AreInitialized() { + // the cluster settings have not been finalized so the cluster cannot yet be initialized + return + } + + clusterController.State.Initialize() + clusterController.assignTokens() +} + +func (clusterController *ClusterController) reassignTokens(oldOwnerID, newOwnerID uint64) { + clusterController.localNodeOwnedPartitionReplicaCache = nil + clusterController.clearPartitionOwnersCache() + localNodeOwnedPartitionReplicas := clusterController.localNodeOwnedPartitionReplicas() + localNodeTokenSnapshot := clusterController.localNodeTokenSnapshot() + clusterController.localNodeOwnedPartitionReplicaCache = nil + clusterController.clearPartitionOwnersCache() + + // make new owner match old owners capacity + clusterController.State.Nodes[newOwnerID].Capacity = clusterController.State.Nodes[oldOwnerID].Capacity + + // move tokens from old owner to new owner + for token, _ := range clusterController.State.Nodes[oldOwnerID].Tokens { + clusterController.State.AssignToken(newOwnerID, token) + } + + // perform diff between original token assignment and new token assignment to build deltas to place into update channel + clusterController.localDiffTokensAndNotify(localNodeTokenSnapshot) + clusterController.localDiffOwnedPartitionReplicasAndNotify(localNodeOwnedPartitionReplicas) +} + +func (clusterController *ClusterController) assignTokens() { + nodes := make([]NodeConfig, 0, len(clusterController.State.Nodes)) + + for _, nodeConfig := range clusterController.State.Nodes { + nodes = append(nodes, *nodeConfig) + } + + sort.Sort(NodeConfigList(nodes)) + newTokenAssignment, _ := clusterController.PartitioningStrategy.AssignTokens(nodes, clusterController.State.Tokens, clusterController.State.ClusterSettings.Partitions) + + clusterController.localNodeOwnedPartitionReplicaCache = nil + clusterController.clearPartitionOwnersCache() + localNodeOwnedPartitionReplicas := clusterController.localNodeOwnedPartitionReplicas() + localNodeTokenSnapshot := clusterController.localNodeTokenSnapshot() + clusterController.localNodeOwnedPartitionReplicaCache = nil + clusterController.clearPartitionOwnersCache() + + for token, owner := range newTokenAssignment { + clusterController.State.AssignToken(owner, uint64(token)) + } + + // perform diff between original token assignment and new token assignment to build deltas to place into update channel + clusterController.localDiffTokensAndNotify(localNodeTokenSnapshot) + clusterController.localDiffOwnedPartitionReplicasAndNotify(localNodeOwnedPartitionReplicas) +} + +func (clusterController *ClusterController) localNodeTokenSnapshot() map[uint64]bool { + nodeConfig, ok := clusterController.State.Nodes[clusterController.LocalNodeID] + + if !ok { + return map[uint64]bool{ } + } + + tokenSnapshot := make(map[uint64]bool, len(nodeConfig.Tokens)) + + for token, _ := range nodeConfig.Tokens { + tokenSnapshot[token] = true + } + + return tokenSnapshot +} + +func (clusterController *ClusterController) localDiffTokensAndNotify(tokenSnapshot map[uint64]bool) { + nodeConfig, ok := clusterController.State.Nodes[clusterController.LocalNodeID] + + if !ok { + return + } + + // find out which tokens have been lost + for token, _ := range tokenSnapshot { + if _, ok := nodeConfig.Tokens[token]; !ok { + // this token was present in the original snapshot but is not there now + clusterController.notifyLocalNode(DeltaNodeLoseToken, NodeLoseToken{ NodeID: clusterController.LocalNodeID, Token: token }) + } + } + + // find out which tokens have been gained + for token, _ := range nodeConfig.Tokens { + if _, ok := tokenSnapshot[token]; !ok { + // this token wasn't present in the original snapshot but is there now + clusterController.notifyLocalNode(DeltaNodeGainToken, NodeGainToken{ NodeID: clusterController.LocalNodeID, Token: token }) + } + } +} + +// A channel that provides notifications for updates to configuration affecting the local node +// This includes gaining or losing ownership of tokens, gaining or losing ownership of partition +// replicas, becoming part of a cluster or being removed from a cluster +func (clusterController *ClusterController) notifyLocalNode(deltaType ClusterStateDeltaType, delta interface{ }) { + clusterController.nextDeltaSet = append(clusterController.nextDeltaSet, ClusterStateDelta{ Type: deltaType, Delta: delta }) +} diff --git a/vendor/github.com/armPelionEdge/devicedb/cluster/delta.go b/vendor/github.com/armPelionEdge/devicedb/cluster/delta.go new file mode 100644 index 0000000..d73e154 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/cluster/delta.go @@ -0,0 +1,212 @@ +package cluster +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + ddbRaft "github.com/armPelionEdge/devicedb/raft" +) + +type ClusterStateDeltaType int + +const ( + DeltaNodeAdd ClusterStateDeltaType = iota + DeltaNodeRemove ClusterStateDeltaType = iota + DeltaNodeLoseToken ClusterStateDeltaType = iota + DeltaNodeGainToken ClusterStateDeltaType = iota + DeltaNodeGainPartitionReplicaOwnership ClusterStateDeltaType = iota + DeltaNodeLosePartitionReplicaOwnership ClusterStateDeltaType = iota + DeltaNodeLosePartitionReplica ClusterStateDeltaType = iota + DeltaNodeGainPartitionReplica ClusterStateDeltaType = iota + DeltaSiteAdded ClusterStateDeltaType = iota + DeltaSiteRemoved ClusterStateDeltaType = iota + DeltaRelayAdded ClusterStateDeltaType = iota + DeltaRelayRemoved ClusterStateDeltaType = iota + DeltaRelayMoved ClusterStateDeltaType = iota +) + +type ClusterStateDeltaRange []ClusterStateDelta + +func (r ClusterStateDeltaRange) Len() int { + return len(r) +} + +func (r ClusterStateDeltaRange) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func (r ClusterStateDeltaRange) Less(i, j int) bool { + if r[i].Type != r[j].Type { + return r[i].Type < r[j].Type + } + + switch r[i].Type { + case DeltaNodeAdd: + return r[i].Delta.(NodeAdd).NodeID < r[j].Delta.(NodeAdd).NodeID + case DeltaNodeRemove: + return r[i].Delta.(NodeRemove).NodeID < r[j].Delta.(NodeRemove).NodeID + case DeltaNodeLoseToken: + if r[i].Delta.(NodeLoseToken).NodeID != r[j].Delta.(NodeLoseToken).NodeID { + return r[i].Delta.(NodeLoseToken).NodeID < r[j].Delta.(NodeLoseToken).NodeID + } + + return r[i].Delta.(NodeLoseToken).Token < r[j].Delta.(NodeLoseToken).Token + case DeltaNodeGainToken: + if r[i].Delta.(NodeGainToken).NodeID != r[j].Delta.(NodeGainToken).NodeID { + return r[i].Delta.(NodeGainToken).NodeID < r[j].Delta.(NodeGainToken).NodeID + } + + return r[i].Delta.(NodeGainToken).Token < r[j].Delta.(NodeGainToken).Token + case DeltaNodeGainPartitionReplicaOwnership: + if r[i].Delta.(NodeGainPartitionReplicaOwnership).NodeID != r[j].Delta.(NodeGainPartitionReplicaOwnership).NodeID { + return r[i].Delta.(NodeGainPartitionReplicaOwnership).NodeID < r[j].Delta.(NodeGainPartitionReplicaOwnership).NodeID + } + + if r[i].Delta.(NodeGainPartitionReplicaOwnership).Partition != r[j].Delta.(NodeGainPartitionReplicaOwnership).Partition { + return r[i].Delta.(NodeGainPartitionReplicaOwnership).Partition < r[j].Delta.(NodeGainPartitionReplicaOwnership).Partition + } + + return r[i].Delta.(NodeGainPartitionReplicaOwnership).Replica < r[j].Delta.(NodeGainPartitionReplicaOwnership).Replica + case DeltaNodeLosePartitionReplicaOwnership: + if r[i].Delta.(NodeLosePartitionReplicaOwnership).NodeID != r[j].Delta.(NodeLosePartitionReplicaOwnership).NodeID { + return r[i].Delta.(NodeLosePartitionReplicaOwnership).NodeID < r[j].Delta.(NodeLosePartitionReplicaOwnership).NodeID + } + + if r[i].Delta.(NodeLosePartitionReplicaOwnership).Partition != r[j].Delta.(NodeLosePartitionReplicaOwnership).Partition { + return r[i].Delta.(NodeLosePartitionReplicaOwnership).Partition < r[j].Delta.(NodeLosePartitionReplicaOwnership).Partition + } + + return r[i].Delta.(NodeLosePartitionReplicaOwnership).Replica < r[j].Delta.(NodeLosePartitionReplicaOwnership).Replica + case DeltaNodeGainPartitionReplica: + if r[i].Delta.(NodeGainPartitionReplica).NodeID != r[j].Delta.(NodeGainPartitionReplica).NodeID { + return r[i].Delta.(NodeGainPartitionReplica).NodeID < r[j].Delta.(NodeGainPartitionReplica).NodeID + } + + if r[i].Delta.(NodeGainPartitionReplica).Partition != r[j].Delta.(NodeGainPartitionReplica).Partition { + return r[i].Delta.(NodeGainPartitionReplica).Partition < r[j].Delta.(NodeGainPartitionReplica).Partition + } + + return r[i].Delta.(NodeGainPartitionReplica).Replica < r[j].Delta.(NodeGainPartitionReplica).Replica + case DeltaNodeLosePartitionReplica: + if r[i].Delta.(NodeLosePartitionReplica).NodeID != r[j].Delta.(NodeLosePartitionReplica).NodeID { + return r[i].Delta.(NodeLosePartitionReplica).NodeID < r[j].Delta.(NodeLosePartitionReplica).NodeID + } + + if r[i].Delta.(NodeLosePartitionReplica).Partition != r[j].Delta.(NodeLosePartitionReplica).Partition { + return r[i].Delta.(NodeLosePartitionReplica).Partition < r[j].Delta.(NodeLosePartitionReplica).Partition + } + + return r[i].Delta.(NodeLosePartitionReplica).Replica < r[j].Delta.(NodeLosePartitionReplica).Replica + case DeltaSiteAdded: + return r[i].Delta.(SiteAdded).SiteID < r[j].Delta.(SiteAdded).SiteID + case DeltaSiteRemoved: + return r[i].Delta.(SiteRemoved).SiteID < r[j].Delta.(SiteRemoved).SiteID + case DeltaRelayAdded: + return r[i].Delta.(RelayAdded).RelayID < r[j].Delta.(RelayAdded).RelayID + case DeltaRelayRemoved: + return r[i].Delta.(RelayRemoved).RelayID < r[j].Delta.(RelayRemoved).RelayID + case DeltaRelayMoved: + if r[i].Delta.(RelayMoved).RelayID != r[j].Delta.(RelayMoved).RelayID { + return r[i].Delta.(RelayMoved).RelayID < r[j].Delta.(RelayMoved).RelayID + } + + return r[i].Delta.(RelayMoved).SiteID < r[j].Delta.(RelayMoved).SiteID + } + + return false +} + +type ClusterStateDelta struct { + Type ClusterStateDeltaType + Delta interface{} +} + +type NodeAdd struct { + NodeID uint64 + NodeConfig NodeConfig +} + +type NodeRemove struct { + NodeID uint64 +} + +type NodeAddress struct { + NodeID uint64 + Address ddbRaft.PeerAddress +} + +type NodeGainToken struct { + NodeID uint64 + Token uint64 +} + +type NodeLoseToken struct { + NodeID uint64 + Token uint64 +} + +type NodeGainPartitionReplicaOwnership struct { + NodeID uint64 + Partition uint64 + Replica uint64 +} + +type NodeLosePartitionReplicaOwnership struct { + NodeID uint64 + Partition uint64 + Replica uint64 +} + +type NodeGainPartitionReplica struct { + NodeID uint64 + Partition uint64 + Replica uint64 +} + +type NodeLosePartitionReplica struct { + NodeID uint64 + Partition uint64 + Replica uint64 +} + +type SiteAdded struct { + SiteID string +} + +type SiteRemoved struct { + SiteID string +} + +type RelayAdded struct { + RelayID string +} + +type RelayRemoved struct { + RelayID string +} + +type RelayMoved struct { + RelayID string + SiteID string +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/cluster/partitioner.go b/vendor/github.com/armPelionEdge/devicedb/cluster/partitioner.go new file mode 100644 index 0000000..92965a0 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/cluster/partitioner.go @@ -0,0 +1,438 @@ +package cluster +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "errors" + "sort" + "sync" + + . "github.com/armPelionEdge/devicedb/data" +) + +type NodeTokenCount struct { + NodeID uint64 + TokenCount int +} + +type NodeTokenCountHeap []NodeTokenCount + +func (nodeTokenCountHeap NodeTokenCountHeap) Len() int { + return len(nodeTokenCountHeap) +} + +func (nodeTokenCountHeap NodeTokenCountHeap) Swap(i, j int) { + nodeTokenCountHeap[i], nodeTokenCountHeap[j] = nodeTokenCountHeap[j], nodeTokenCountHeap[i] +} + +func (nodeTokenCountHeap NodeTokenCountHeap) Less(i, j int) bool { + if nodeTokenCountHeap[i].TokenCount == nodeTokenCountHeap[j].TokenCount { + return nodeTokenCountHeap[i].NodeID < nodeTokenCountHeap[j].NodeID + } + + return nodeTokenCountHeap[i].TokenCount > nodeTokenCountHeap[j].TokenCount +} + +func (nodeTokenCountHeap *NodeTokenCountHeap) Push(x interface{}) { + *nodeTokenCountHeap = append(*nodeTokenCountHeap, x.(NodeTokenCount)) +} + +func (nodeTokenCountHeap *NodeTokenCountHeap) Pop() interface{} { + old := *nodeTokenCountHeap + n := len(old) + x := old[n - 1] + *nodeTokenCountHeap = old[0 : n - 1] + + return x +} + +const MaxPartitionCount uint64 = 65536 +const DefaultPartitionCount uint64 = 1024 +const MinPartitionCount uint64 = 64 + +var EPreconditionFailed = errors.New("Unable to validate precondition") +var ENoNodesAvailable = errors.New("Unable to assign tokens because there are no available nodes in the cluster") + +type PartitioningStrategy interface { + AssignTokens(nodes []NodeConfig, currentTokenAssignment []uint64, partitions uint64) ([]uint64, error) + AssignPartitions(nodes []NodeConfig, currentPartitionAssignment [][]uint64) + Owners(tokenAssignment []uint64, partition uint64, replicationFactor uint64) []uint64 + Partition(key string, partitionCount uint64) uint64 +} + +// Simple replication strategy that does not account for capacity other than finding nodes +// that are marked as having 0 capacity to account for decomissioned nodes. Other than that +// It just tries to assign as close to an even amount of tokens to each node as possible +type SimplePartitioningStrategy struct { + // cached partition count + partitionCount uint64 + // cached shift amount so it doesnt have to be recalculated every time + shiftAmount int + lock sync.Mutex +} + +func (ps *SimplePartitioningStrategy) countAvailableNodes(nodes []NodeConfig) int { + availableNodes := 0 + + for _, node := range nodes { + if node.Capacity == 0 { + continue + } + + availableNodes++ + } + + return availableNodes +} + +func (ps *SimplePartitioningStrategy) countTokens(nodes []NodeConfig) []uint64 { + tokens := make([]uint64, len(nodes)) + + for i, node := range nodes { + tokens[i] = uint64(len(node.Tokens)) + } + + return tokens +} + +func (ps *SimplePartitioningStrategy) checkPreconditions(nodes []NodeConfig, currentAssignments []uint64, partitions uint64) error { + // Precondition 1: nodes must be non-nil + if nodes == nil { + return EPreconditionFailed + } + + // Precondition 2: nodes must be sorted in order of ascending node id and all node ids are unique + if !ps.nodesAreSortedAndUnique(nodes) { + return EPreconditionFailed + } + + // Precondition 3: The length of currentAssignments must be equal to partitions + if uint64(len(currentAssignments)) != partitions { + return EPreconditionFailed + } + + // Precondition 4: partitions must be non-zero + if partitions == 0 { + return EPreconditionFailed + } + + // Precondition 5: For all assignments in currentAssignments, the node a token is assigned to must exist in nodes[] unless it is set to zero + // which indicates that the node does not exist + for _, owner := range currentAssignments { + if owner == 0 { + continue + } + + ownerExists := false + + for _, node := range nodes { + if node.Address.NodeID == owner { + ownerExists = true + + break + } + } + + if !ownerExists { + return EPreconditionFailed + } + } + + return nil +} + +func (ps *SimplePartitioningStrategy) nodesAreSortedAndUnique(nodes []NodeConfig) bool { + var lastNodeID uint64 = 0 + + for _, node := range nodes { + if lastNodeID >= node.Address.NodeID { + return false + } + + lastNodeID = node.Address.NodeID + } + + return true +} + +func (ps *SimplePartitioningStrategy) AssignTokens(nodes []NodeConfig, currentAssignments []uint64, partitions uint64) ([]uint64, error) { + if err := ps.checkPreconditions(nodes, currentAssignments, partitions); err != nil { + return nil, err + } + + // Precondition 6: The number of nodes must be <= partitions + if uint64(len(nodes)) > partitions { + // in this case return a valid assignment using just a subset of nodes. limit the number of nodes + // that are assigned tokens to be <= partitions + nodes = nodes[:partitions] + } + + assignments := make([]uint64, partitions) + tokenCounts := ps.countTokens(nodes) + availableNodes := ps.countAvailableNodes(nodes) + + if availableNodes == 0 { + return assignments, nil + } + + tokenCountFloor := partitions / uint64(availableNodes) + tokenCountCeil := tokenCountFloor + + if partitions % uint64(availableNodes) != 0 { + tokenCountCeil += 1 + } + + copy(assignments, currentAssignments) + + // unassign any token owned by a decommissioned node + for i, node := range nodes { + if node.Capacity != 0 { + continue + } + + // release tokens owned by this decommissioned node + for token, _ := range node.Tokens { + assignments[token] = 0 + tokenCounts[i]-- + } + } + + var nextNode int + + // find an owner for unplaced tokens. Tokens may be unplaced due to an uninitialized cluster, + // removed nodes, or decommissioned nodes + for token, owner := range assignments { + if owner != 0 { + continue + } + + // Token is unassigned. Need to find a home for it + for i := 0; i < len(nodes); i++ { + nodeIndex := (nextNode + i) % len(nodes) + node := nodes[nodeIndex] + + if node.Capacity == 0 { + // This node is decommissioning. It is effectively removed from the cluster + continue + } + + if tokenCounts[nodeIndex] < tokenCountCeil { + assignments[token] = node.Address.NodeID + tokenCounts[nodeIndex]++ + nextNode = (nodeIndex + 1) % len(nodes) + + break + } + } + } + + // invariant: all tokens should be placed at some non-decomissioned node + + for i, _ := range nodes { + if nodes[i].Capacity == 0 { + // The ith node is decommissioning. It should receive none of the tokens + continue + } + + // Pass 1: Each time after givine a token to this node, + // skip some tokens in an attempt to avoid adjacencies + // Space out the placement evenly throughout the range using this skip amount + skipAmount := len(assignments) / int(tokenCountFloor) + + if skipAmount > 1 { + skipAmount -= 1 + } + + for token := 0; token < len(assignments) && tokenCounts[i] < tokenCountFloor; token++ { + owner := assignments[token] + ownerIndex := 0 + + for j := 0; j < len(nodes); j++ { + if nodes[j].Address.NodeID == owner { + ownerIndex = j + break + } + } + + // take this token + if tokenCounts[ownerIndex] > tokenCountFloor { + assignments[token] = nodes[i].Address.NodeID + tokenCounts[i]++ + tokenCounts[ownerIndex]-- + + // Increment so a space is skipped + token += skipAmount + } + } + + // Pass 2: Don't skip any spaces. Just make sure the node + // gets enough tokens assigned to it + // Should evenly space tokens throughout the ring for this node for even + // partition distributions + for j := 0; tokenCounts[i] < tokenCountFloor && j < len(tokenCounts); j++ { + if j == i || tokenCounts[j] <= tokenCountFloor { + // a node can't steal a token from itself and it can't steal a token + // from a node that doesn't have surplus tokens + continue + } + + // steal a token from the jth node + for token, owner := range assignments { + if owner == nodes[j].Address.NodeID { + assignments[token] = nodes[i].Address.NodeID + tokenCounts[i]++ + tokenCounts[j]-- + + // We have taken all the tokens that we can take from this node. need to move on + if tokenCounts[j] == tokenCountFloor { + break + } + } + } + } + + // loop invariant: all nodes in nodes[:i+1] that have positive capacity have been assigned at least tokenCountFloor tokens and at most tokenCountCeil tokens + } + + return assignments, nil +} + +func (ps *SimplePartitioningStrategy) AssignPartitions(nodes []NodeConfig, currentPartitionAssignment [][]uint64) { + sort.Sort(NodeConfigList(nodes)) + + var nodeOwnershipHistogram map[uint64]int = make(map[uint64]int) + + for _, node := range nodes { + if node.Capacity > 0 { + nodeOwnershipHistogram[node.Address.NodeID] = 0 + } + } + + // build histogram and mark any partition replicas in the assignment that were assigned to nodes + // that are no longer in the cluster + for partition := 0; partition < len(currentPartitionAssignment); partition++ { + for replica := 0; replica < len(currentPartitionAssignment[partition]); replica++ { + ownerID := currentPartitionAssignment[partition][replica] + + if _, ok := nodeOwnershipHistogram[ownerID]; !ok { + currentPartitionAssignment[partition][replica] = 0 + continue + } + + nodeOwnershipHistogram[ownerID]++ + } + } + + if len(nodeOwnershipHistogram) == 0 { + return + } + + var minimumPartitionReplicas int = (len(currentPartitionAssignment) * len(currentPartitionAssignment[0])) / len(nodeOwnershipHistogram) + + for _, node := range nodes { + var nodeID uint64 = node.Address.NodeID + + if _, ok := nodeOwnershipHistogram[nodeID]; !ok { + continue + } + + if nodeOwnershipHistogram[nodeID] < minimumPartitionReplicas { + for partition := 0; partition < len(currentPartitionAssignment); partition++ { + for replica := 0; replica < len(currentPartitionAssignment[partition]); replica++ { + ownerID := currentPartitionAssignment[partition][replica] + + if ownerID == 0 || nodeOwnershipHistogram[ownerID] > minimumPartitionReplicas { + currentPartitionAssignment[partition][replica] = nodeID + nodeOwnershipHistogram[nodeID]++ + + if ownerID != 0 { + nodeOwnershipHistogram[ownerID]-- + } + } + } + } + } + } +} + +func (ps *SimplePartitioningStrategy) Owners(tokenAssignment []uint64, partition uint64, replicationFactor uint64) []uint64 { + if tokenAssignment == nil { + return []uint64{} + } + + if partition >= uint64(len(tokenAssignment)) { + return []uint64{} + } + + ownersSet := make(map[uint64]bool, int(replicationFactor)) + owners := make([]uint64, 0, int(replicationFactor)) + + for i := 0; i < len(tokenAssignment) && len(ownersSet) < int(replicationFactor); i++ { + realIndex := (i + int(partition)) % len(tokenAssignment) + + if _, ok := ownersSet[tokenAssignment[realIndex]]; !ok { + ownersSet[tokenAssignment[realIndex]] = true + owners = append(owners, tokenAssignment[realIndex]) + } + } + + if uint64(len(owners)) > 0 && replicationFactor > uint64(len(owners)) { + originalOwnersList := owners + + for i := 0; uint64(i) < replicationFactor - uint64(len(originalOwnersList)); i++ { + owners = append(owners, originalOwnersList[i % len(originalOwnersList)]) + } + } + + return owners +} + +func (ps *SimplePartitioningStrategy) Partition(key string, partitionCount uint64) uint64 { + hash := NewHash([]byte(key)).High() + + if ps.shiftAmount == 0 { + ps.CalculateShiftAmount(partitionCount) + } + + return hash >> uint(ps.shiftAmount) +} + +func (ps *SimplePartitioningStrategy) CalculateShiftAmount(partitionCount uint64) int { + ps.lock.Lock() + defer ps.lock.Unlock() + + if ps.shiftAmount != 0 { + return ps.shiftAmount + } + + ps.shiftAmount = 65 + + for partitionCount > 0 { + ps.shiftAmount-- + partitionCount = partitionCount >> 1 + } + + return ps.shiftAmount +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/cluster/state.go b/vendor/github.com/armPelionEdge/devicedb/cluster/state.go new file mode 100644 index 0000000..59e3f73 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/cluster/state.go @@ -0,0 +1,405 @@ +package cluster +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "errors" + "encoding/json" + + ddbRaft "github.com/armPelionEdge/devicedb/raft" +) + +var ENoSuchPartition = errors.New("The specified partition does not exist") +var ENoSuchToken = errors.New("The specified token does not exist") +var ENoSuchReplica = errors.New("The specified partition replica does not exist") + +type PartitionReplica struct { + // The partition number. The partition number combined with the total number of partitions and the range of the hash + // space define a contiguous range in the hash space for which this partition is responsible + Partition uint64 + // The index of this partition replica. If the replication factor is set to 3 this number will range from 0 to 2 + // The 0th partition replica represents the primary replica for that partition. The owner of the primary replica + // will be the only node able to accept writes for this partition. The other replicas for this partition serve + // only as backups + Replica uint64 + // The ID of the node that holds this partition replica. The holder can differ from the owner if the cluster is in + // a transitional state and the partition replica is being transferred to a new node. The owner is based only on + // the current token assignments + Holder uint64 + // The ID of the node that owns this partition + Owner uint64 +} + +type NodeConfig struct { + // The network address of the node + Address ddbRaft.PeerAddress + // Node capacity in bytes + Capacity uint64 + // The tokens owned by this node + Tokens map[uint64]bool + // a set of partition replicas owned by this node + OwnedPartitionReplicas map[uint64]map[uint64]bool + // a set of partition replicas held by this node. This is derived from the cluster state and is used + // only internally for quick lookup. It is not stored or transferred as part of a node's configuration + PartitionReplicas map[uint64]map[uint64]bool +} + +func (nodeConfig *NodeConfig) takePartitionReplica(partition, replica uint64) { + if _, ok := nodeConfig.PartitionReplicas[partition]; !ok { + nodeConfig.PartitionReplicas[partition] = make(map[uint64]bool) + } + + nodeConfig.PartitionReplicas[partition][replica] = true +} + +func (nodeConfig *NodeConfig) relinquishPartitionReplica(partition, replica uint64) { + replicas, ok := nodeConfig.PartitionReplicas[partition] + + if !ok { + return + } + + delete(replicas, replica) + + if len(replicas) == 0 { + delete(nodeConfig.PartitionReplicas, partition) + } +} + +func (nodeConfig *NodeConfig) takePartitionReplicaOwnership(partition, replica uint64) { + if _, ok := nodeConfig.OwnedPartitionReplicas[partition]; !ok { + nodeConfig.OwnedPartitionReplicas[partition] = make(map[uint64]bool) + } + + nodeConfig.OwnedPartitionReplicas[partition][replica] = true +} + +func (nodeConfig *NodeConfig) relinquishPartitionReplicaOwnership(partition, replica uint64) { + replicas, ok := nodeConfig.OwnedPartitionReplicas[partition] + + if !ok { + return + } + + delete(replicas, replica) + + if len(replicas) == 0 { + delete(nodeConfig.OwnedPartitionReplicas, partition) + } +} + +func (nodeConfig *NodeConfig) relinquishToken(token uint64) { + delete(nodeConfig.Tokens, token) +} + +func (nodeConfig *NodeConfig) takeToken(token uint64) { + nodeConfig.Tokens[token] = true +} + +type ClusterState struct { + // A set of nodes IDs of nodes that were previously cluster members + // but were since removed + RemovedNodes map[uint64]bool + // Ring members and their configuration + Nodes map[uint64]*NodeConfig + // A mapping between tokens and the node that owns them + Tokens []uint64 + // The partition replicas in this node + Partitions [][]*PartitionReplica + // Global cluster settings that must be initialized before the cluster is + // initialized + ClusterSettings ClusterSettings + Sites map[string]bool + Relays map[string]string +} + +func (clusterState *ClusterState) SiteExists(siteID string) bool { + if clusterState.Sites == nil { + return false + } + + _, ok := clusterState.Sites[siteID] + + return ok +} + +func (clusterState *ClusterState) AddSite(siteID string) { + if clusterState.Sites == nil { + clusterState.Sites = make(map[string]bool) + } + + clusterState.Sites[siteID] = true +} + +func (clusterState *ClusterState) RemoveSite(siteID string) { + if clusterState.Sites == nil { + return + } + + for relayID, relaysSiteID := range clusterState.Relays { + if siteID == relaysSiteID { + clusterState.Relays[relayID] = "" + } + } + + delete(clusterState.Sites, siteID) +} + +func (clusterState *ClusterState) AddRelay(relayID string) { + if clusterState.Relays == nil { + clusterState.Relays = make(map[string]string) + } + + clusterState.Relays[relayID] = "" +} + +func (clusterState *ClusterState) RemoveRelay(relayID string) { + if clusterState.Relays == nil { + return + } + + delete(clusterState.Relays, relayID) +} + +func (clusterState *ClusterState) MoveRelay(relayID, siteID string) { + if clusterState.Relays == nil || clusterState.Sites == nil { + return + } + + if _, ok := clusterState.Relays[relayID]; !ok { + return + } + + if _, ok := clusterState.Sites[siteID]; !ok && siteID != "" { + return + } + + clusterState.Relays[relayID] = siteID +} + +func (clusterState *ClusterState) AddNode(nodeConfig NodeConfig) { + if clusterState.Nodes == nil { + // lazy initialization of nodes map + clusterState.Nodes = make(map[uint64]*NodeConfig) + } + + // node ID must be non-zero + if nodeConfig.Address.NodeID == 0 { + return + } + + // ignore if this node is already added to the cluster + if _, ok := clusterState.Nodes[nodeConfig.Address.NodeID]; ok { + return + } + + clusterState.Nodes[nodeConfig.Address.NodeID] = &nodeConfig +} + +func (clusterState *ClusterState) RemoveNode(node uint64) { + // ignore if this node doesnt exist in the cluster + if _, ok := clusterState.Nodes[node]; !ok { + return + } + + // any partition that was held by this node is now held by nobody + for partition, replicas := range clusterState.Nodes[node].PartitionReplicas { + for replica, _ := range replicas { + clusterState.Nodes[node].relinquishPartitionReplica(partition, replica) + clusterState.Partitions[partition][replica].Holder = 0 + } + } + + // any partition that was owned by this node is now held by nobody + for partition, replicas := range clusterState.Nodes[node].OwnedPartitionReplicas { + for replica, _ := range replicas { + clusterState.Nodes[node].relinquishPartitionReplicaOwnership(partition, replica) + clusterState.Partitions[partition][replica].Owner = 0 + } + } + + // any token that was owned by this node is now owned by nobody + for token, _ := range clusterState.Nodes[node].Tokens { + clusterState.Nodes[node].relinquishToken(token) + clusterState.Tokens[token] = 0 + } + + delete(clusterState.Nodes, node) + + if clusterState.RemovedNodes == nil { + clusterState.RemovedNodes = make(map[uint64]bool) + } + + clusterState.RemovedNodes[node] = true +} + +// change the owner of a token +func (clusterState *ClusterState) AssignToken(node, token uint64) error { + if token >= uint64(len(clusterState.Tokens)) { + return ENoSuchToken + } + + if _, ok := clusterState.Nodes[node]; !ok && node != 0 { + return ENoSuchNode + } + + currentOwner := clusterState.Tokens[token] + + if currentOwner != 0 { + clusterState.Nodes[currentOwner].relinquishToken(token) + } + + // invariant should be maintained that a token is owned by exactly one node at a time + clusterState.Tokens[token] = node + + // Need to allow the node to be set to zero for a token + if node != 0 { + clusterState.Nodes[node].takeToken(token) + } + + return nil +} + +// change the owner of a partition replicas +func (clusterState *ClusterState) AssignPartitionReplicaOwnership(partition, replica, node uint64) error { + if partition >= uint64(len(clusterState.Partitions)) { + return ENoSuchPartition + } + + replicas := clusterState.Partitions[partition] + _, okNode := clusterState.Nodes[node] + + if !okNode { + return ENoSuchNode + } + + if replica >= uint64(len(replicas)) { + return ENoSuchReplica + } + + currentOwner := replicas[replica].Owner + + if currentOwner != 0 { + // invariant should be maintained that a partition replica is owned by exactly one node at a time + clusterState.Nodes[currentOwner].relinquishPartitionReplicaOwnership(partition, replica) + } + + replicas[replica].Owner = node + clusterState.Nodes[node].takePartitionReplicaOwnership(partition, replica) + + return nil +} + +// change the holder of a partition replica +func (clusterState *ClusterState) AssignPartitionReplica(partition, replica, node uint64) error { + if partition >= uint64(len(clusterState.Partitions)) { + return ENoSuchPartition + } + + replicas := clusterState.Partitions[partition] + _, okNode := clusterState.Nodes[node] + + if !okNode { + return ENoSuchNode + } + + if replica >= uint64(len(replicas)) { + return ENoSuchReplica + } + + currentHolder := replicas[replica].Holder + + if currentHolder != 0 { + // invariant should be maintained that a partition replica is owned by exactly one node at a time + clusterState.Nodes[currentHolder].relinquishPartitionReplica(partition, replica) + } + + replicas[replica].Holder = node + clusterState.Nodes[node].takePartitionReplica(partition, replica) + + return nil +} + +func (clusterState *ClusterState) Initialize() { + if !clusterState.ClusterSettings.AreInitialized() { + return + } + + clusterState.Tokens = make([]uint64, clusterState.ClusterSettings.Partitions) + clusterState.Partitions = make([][]*PartitionReplica, clusterState.ClusterSettings.Partitions) + + for partition := 0; uint64(partition) < clusterState.ClusterSettings.Partitions; partition++ { + clusterState.Partitions[partition] = make([]*PartitionReplica, clusterState.ClusterSettings.ReplicationFactor) + + for replica := 0; uint64(replica) < clusterState.ClusterSettings.ReplicationFactor; replica++ { + clusterState.Partitions[partition][replica] = &PartitionReplica{ + Partition: uint64(partition), + Replica: uint64(replica), + } + } + } +} + +func (clusterState *ClusterState) Snapshot() ([]byte, error) { + return json.Marshal(clusterState) +} + +func (clusterState *ClusterState) Recover(snapshot []byte) error { + var cs ClusterState + err := json.Unmarshal(snapshot, &cs) + + if err != nil { + return err + } + + *clusterState = cs + + return nil +} + +type ClusterSettings struct { + // The replication factor of this cluster + ReplicationFactor uint64 + // The number of partitions in the hash space + Partitions uint64 +} + +func (clusterSettings *ClusterSettings) AreInitialized() bool { + return clusterSettings.ReplicationFactor != 0 && clusterSettings.Partitions != 0 +} + +type NodeConfigList []NodeConfig + +func (nodeConfigList NodeConfigList) Len() int { + return len(nodeConfigList) +} + +func (nodeConfigList NodeConfigList) Swap(i, j int) { + nodeConfigList[i], nodeConfigList[j] = nodeConfigList[j], nodeConfigList[i] +} + +func (nodeConfigList NodeConfigList) Less(i, j int) bool { + return nodeConfigList[i].Address.NodeID < nodeConfigList[j].Address.NodeID +} diff --git a/vendor/github.com/armPelionEdge/devicedb/clusterio/agent.go b/vendor/github.com/armPelionEdge/devicedb/clusterio/agent.go new file mode 100644 index 0000000..93536c7 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/clusterio/agent.go @@ -0,0 +1,602 @@ +package clusterio +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "fmt" + "github.com/prometheus/client_golang/prometheus" + "sync" + "time" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/routes" +) + +var ( + prometheusRequestCounts = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "sites", + Subsystem: "devicedb_internal", + Name: "request_counts", + Help: "The number of requests", + }, []string{ + "type", + "source_node", + "endpoint_node", + }) + + prometheusRequestFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "sites", + Subsystem: "devicedb_internal", + Name: "request_failures", + Help: "The number of request failures", + }, []string{ + "type", + "source_node", + "endpoint_node", + }) + + prometheusReachabilityStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "devicedb_peer_reachability", + Help: "A binary guage indicating the reachability status of peer nodes", + }, []string{ + "node", + }) +) + +func init() { + prometheus.MustRegister(prometheusRequestCounts, prometheusRequestFailures, prometheusReachabilityStatus) +} + +var DefaultTimeout time.Duration = time.Second * 20 + +type getResult struct { + nodeID uint64 + siblingSets []*SiblingSet +} + +type getMatchesResult struct { + nodeID uint64 + siblingSetIterator SiblingSetIterator +} + +type Agent struct { + PartitionResolver PartitionResolver + NodeClient NodeClient + NodeReadRepairer NodeReadRepairer + Timeout time.Duration + mu sync.Mutex + nextOperationID uint64 + operationCancellers map[uint64]func() +} + +func NewAgent(nodeClient NodeClient, partitionResolver PartitionResolver) *Agent { + readRepairer := NewReadRepairer(nodeClient) + readRepairer.Timeout = DefaultTimeout + + return &Agent{ + Timeout: DefaultTimeout, + PartitionResolver: partitionResolver, + NodeClient: nodeClient, + NodeReadRepairer: readRepairer, + operationCancellers: make(map[uint64]func(), 0), + } +} + +func (agent *Agent) recordRequestMetrics(requestType string, destinationNode uint64, err error) { + var labels = prometheus.Labels{ + "type": requestType, + "source_node": fmt.Sprintf("%d", agent.NodeClient.LocalNodeID()), + "endpoint_node": fmt.Sprintf("%d", destinationNode), + } + + prometheusRequestCounts.With(labels).Inc() + + var connectivityStatus float64 = 1 + + if err != nil { + prometheusRequestFailures.With(labels).Inc() + + + // The connectivity status should only be set to 0 if the error was connectivity based, not a request related error produced + // by the destination node. + if _, ok := err.(DBerror); !ok { + connectivityStatus = 0 + } + } + + prometheusReachabilityStatus.With(prometheus.Labels{ "node": labels["endpoint_node"] }).Set(connectivityStatus) +} + +func (agent *Agent) Merge(ctx context.Context, siteID string, bucket string, patch map[string]*SiblingSet) (int, int, error) { + var partitionNumber uint64 = agent.PartitionResolver.Partition(siteID) + var replicaNodes []uint64 = agent.PartitionResolver.ReplicaNodes(partitionNumber) + var resultError error = ENoQuorum + + opID, ctxDeadline := agent.newOperation(ctx) + + var remainingNodes map[uint64]bool = make(map[uint64]bool, len(replicaNodes)) + + for _, nodeID := range replicaNodes { + remainingNodes[nodeID] = true + } + + nTotal := len(remainingNodes) + nMerged, err := agent.merge(ctxDeadline, opID, remainingNodes, agent.NQuorum(nTotal), partitionNumber, siteID, bucket, patch, false) + + if err == ENoQuorum { + // If a specific error occurred before this overrides ENoQuorum + err = resultError + } + + return nTotal, nMerged, err +} + +func (agent *Agent) Batch(ctx context.Context, siteID string, bucket string, updateBatch *UpdateBatch) (int, int, error) { + var partitionNumber uint64 = agent.PartitionResolver.Partition(siteID) + var replicaNodes []uint64 = agent.PartitionResolver.ReplicaNodes(partitionNumber) + var resultError error = ENoQuorum + var nFailed int + + opID, ctxDeadline := agent.newOperation(ctx) + + var remainingNodes map[uint64]bool = make(map[uint64]bool, len(replicaNodes)) + + for _, nodeID := range replicaNodes { + remainingNodes[nodeID] = true + } + + var nTotal int = len(remainingNodes) + + // If local node is included in remainingNodes it should be attempted first + if remainingNodes[agent.NodeClient.LocalNodeID()] { + nodeID := agent.NodeClient.LocalNodeID() + patch, err := agent.NodeClient.Batch(ctxDeadline, nodeID, partitionNumber, siteID, bucket, updateBatch) + + delete(remainingNodes, nodeID) + + agent.recordRequestMetrics("batch", nodeID, err) + + if err != nil { + Log.Errorf("Unable to execute batch update to bucket %s at site %s at node %d: %v", bucket, siteID, nodeID, err.Error()) + + if err == EBucketDoesNotExist || err == ESiteDoesNotExist { + resultError = err + } + + nFailed++ + } else { + // passing in NQuorum() - 1 since one node was already successful in applying the update so + // we require one less node to achieve write quorum + nMerged, err := agent.merge(ctxDeadline, opID, remainingNodes, agent.NQuorum(nTotal) - 1, partitionNumber, siteID, bucket, patch, true) + + if err == ENoQuorum { + // If a specific error occurred before this overrides ENoQuorum + err = resultError + } + + return nTotal, nMerged + 1, err + } + } + + for nodeID, _ := range remainingNodes { + patch, err := agent.NodeClient.Batch(ctxDeadline, nodeID, partitionNumber, siteID, bucket, updateBatch) + + delete(remainingNodes, nodeID) + + agent.recordRequestMetrics("batch", nodeID, err) + + if err != nil { + Log.Errorf("Unable to execute batch update to bucket %s at site %s at node %d: %v", bucket, siteID, nodeID, err.Error()) + + if err == EBucketDoesNotExist || err == ESiteDoesNotExist { + resultError = err + } + + nFailed++ + + continue + } + + // passing in NQuorum() - 1 since one node was already successful in applying the update so + // we require one less node to achieve write quorum + nMerged, err := agent.merge(ctxDeadline, opID, remainingNodes, agent.NQuorum(nTotal) - 1, partitionNumber, siteID, bucket, patch, true) + + if err == ENoQuorum { + // If a specific error occurred before this overrides ENoQuorum + err = resultError + } + + return nTotal, nMerged + 1, err + } + + return nTotal, 0, resultError +} + +func (agent *Agent) merge(ctx context.Context, opID uint64, nodes map[uint64]bool, nQuorum int, partitionNumber uint64, siteID string, bucket string, patch map[string]*SiblingSet, broadcastToRelays bool) (int, error) { + var nApplied int = 0 + var nFailed int = 0 + var applied chan int = make(chan int, len(nodes)) + var failed chan error = make(chan error, len(nodes)) + var resultError error = ENoQuorum + + for nodeID, _ := range nodes { + go func(nodeID uint64) { + err := agent.NodeClient.Merge(ctx, nodeID, partitionNumber, siteID, bucket, patch, broadcastToRelays) + + agent.recordRequestMetrics("merge", nodeID, err) + + if err != nil { + Log.Errorf("Unable to merge patch into bucket %s at site %s at node %d: %v", bucket, siteID, nodeID, err.Error()) + + failed <- err + + return + } + + applied <- 1 + }(nodeID) + } + + var quorumReached chan int = make(chan int) + var allAttemptsMade chan int = make(chan int, 1) + + go func() { + // All calls to NodeClient.Merge() will succeed, receive a failure reponse from the specified peer, or time out eventually + for nApplied + nFailed < len(nodes) { + select { + case err := <-failed: + // indicates that a call to NodeClient.Merge() either received an error response from the specified node or timed out + nFailed++ + + if err == EBucketDoesNotExist || err == ESiteDoesNotExist { + resultError = err + } + case <-applied: + // indicates that a call to NodeClient.Merge() received a success response from the specified node + nApplied++ + + if nApplied == nQuorum { + // if a quorum of nodes have successfully been written to then the Merge() function should return successfully + // without waiting for the rest of the responses to come in. However, this function should continue to run + // until the deadline is reached or a response (whether it be failure or success) is received for each call + // to Merge() to allow this update to propogate to all replicas + quorumReached <- nApplied + } + } + } + + // Once the deadline is reached or all calls to Merge() have received a response this channel should be written + // to so that if Merge() is still waiting to return (since quorum was never reached) it will return an error response + // indicating that quorum was not establish and the update was not successfully applied + allAttemptsMade <- nApplied + // Do this to remove the canceller from the map even though the operation is already done + agent.cancelOperation(opID) + }() + + if len(nodes) == 0 { + if nQuorum == 0 { + return 0, nil + } + + return 0, ENoQuorum + } + + select { + case n := <-allAttemptsMade: + return n, resultError + case n := <-quorumReached: + return n, nil + } +} + +func (agent *Agent) Get(ctx context.Context, siteID string, bucket string, keys [][]byte) ([]*SiblingSet, error) { + var partitionNumber uint64 = agent.PartitionResolver.Partition(siteID) + var replicaNodes []uint64 = agent.PartitionResolver.ReplicaNodes(partitionNumber) + var readMerger *ReadMerger = NewReadMerger(bucket) + var readResults chan getResult = make(chan getResult, len(replicaNodes)) + var failed chan error = make(chan error, len(replicaNodes)) + var nRead int = 0 + var nFailed int = 0 + var resultError error = ENoQuorum + + opID, ctxDeadline := agent.newOperation(ctx) + + var appliedNodes map[uint64]bool = make(map[uint64]bool, len(replicaNodes)) + + for _, nodeID := range replicaNodes { + if appliedNodes[nodeID] { + continue + } + + appliedNodes[nodeID] = true + + go func(nodeID uint64) { + siblingSets, err := agent.NodeClient.Get(ctxDeadline, nodeID, partitionNumber, siteID, bucket, keys) + + agent.recordRequestMetrics("get", nodeID, err) + + if err != nil { + Log.Errorf("Unable to get keys from bucket %s at site %s at node %d: %v", bucket, siteID, nodeID, err.Error()) + + failed <- err + + return + } + + readResults <- getResult{ nodeID: nodeID, siblingSets: siblingSets } + }(nodeID) + } + + var mergedResult chan []*SiblingSet = make(chan []*SiblingSet) + var allAttemptsMade chan int = make(chan int, 1) + + go func() { + for nRead + nFailed < len(appliedNodes) { + select { + case err := <-failed: + nFailed++ + + if err == EBucketDoesNotExist || err == ESiteDoesNotExist { + resultError = err + } + case r := <-readResults: + nRead++ + + for i, key := range keys { + readMerger.InsertKeyReplica(r.nodeID, string(key), r.siblingSets[i]) + } + + if nRead == agent.NQuorum(len(appliedNodes)) { + // calculate result set + var resultSet []*SiblingSet = make([]*SiblingSet, len(keys)) + + for i, key := range keys { + resultSet[i] = readMerger.Get(string(key)) + } + + mergedResult <- resultSet + } + } + } + + agent.NodeReadRepairer.BeginRepair(partitionNumber, siteID, bucket, readMerger) + allAttemptsMade <- 1 + agent.cancelOperation(opID) + }() + + select { + case result := <-mergedResult: + return result, nil + case <-allAttemptsMade: + return nil, resultError + } +} + +func (agent *Agent) GetMatches(ctx context.Context, siteID string, bucket string, keys [][]byte) (SiblingSetIterator, error) { + var partitionNumber uint64 = agent.PartitionResolver.Partition(siteID) + var replicaNodes []uint64 = agent.PartitionResolver.ReplicaNodes(partitionNumber) + var readMerger *ReadMerger = NewReadMerger(bucket) + var mergeIterator *SiblingSetMergeIterator = NewSiblingSetMergeIterator(readMerger) + var readResults chan getMatchesResult = make(chan getMatchesResult, len(replicaNodes)) + var failed chan error = make(chan error, len(replicaNodes)) + var nRead int = 0 + var nFailed int = 0 + var resultError error = ENoQuorum + + opID, ctxDeadline := agent.newOperation(ctx) + + var appliedNodes map[uint64]bool = make(map[uint64]bool, len(replicaNodes)) + + for _, nodeID := range replicaNodes { + if appliedNodes[nodeID] { + continue + } + + appliedNodes[nodeID] = true + + go func(nodeID uint64) { + ssIterator, err := agent.NodeClient.GetMatches(ctxDeadline, nodeID, partitionNumber, siteID, bucket, keys) + + agent.recordRequestMetrics("get_matches", nodeID, err) + + if err != nil { + Log.Errorf("Unable to get matches from bucket %s at site %s at node %d: %v", bucket, siteID, nodeID, err.Error()) + + failed <- err + + return + } + + readResults <- getMatchesResult{ nodeID: nodeID, siblingSetIterator: ssIterator } + }(nodeID) + } + + var quorumReached chan int = make(chan int) + var allAttemptsMade chan int = make(chan int, 1) + + go func() { + for nFailed + nRead < len(appliedNodes) { + select { + case err := <-failed: + nFailed++ + + if err == EBucketDoesNotExist || err == ESiteDoesNotExist { + resultError = err + } + case result := <-readResults: + for result.siblingSetIterator.Next() { + readMerger.InsertKeyReplica(result.nodeID, string(result.siblingSetIterator.Key()), result.siblingSetIterator.Value()) + mergeIterator.AddKey(string(result.siblingSetIterator.Prefix()), string(result.siblingSetIterator.Key())) + } + + if result.siblingSetIterator.Error() != nil { + nFailed++ + } else { + nRead++ + } + + if nRead == agent.NQuorum(len(appliedNodes)) { + quorumReached <- 1 + } + } + } + + agent.NodeReadRepairer.BeginRepair(partitionNumber, siteID, bucket, readMerger) + allAttemptsMade <- 1 + agent.cancelOperation(opID) + }() + + select { + case <-allAttemptsMade: + return nil, resultError + case <-quorumReached: + mergeIterator.SortKeys() + return mergeIterator, nil + } +} + +func (agent *Agent) RelayStatus(ctx context.Context, siteID string, relayID string) (RelayStatus, error) { + var partitionNumber uint64 = agent.PartitionResolver.Partition(siteID) + var replicaNodes []uint64 = agent.PartitionResolver.ReplicaNodes(partitionNumber) + var results chan RelayStatus = make(chan RelayStatus, len(replicaNodes)) + var failed chan error = make(chan error, len(replicaNodes)) + var nRead int = 0 + var nFailed int = 0 + var resultError error + + opID, ctxDeadline := agent.newOperation(ctx) + + var appliedNodes map[uint64]bool = make(map[uint64]bool, len(replicaNodes)) + + for _, nodeID := range replicaNodes { + if appliedNodes[nodeID] { + continue + } + + appliedNodes[nodeID] = true + + go func(nodeID uint64) { + relayStatus, err := agent.NodeClient.RelayStatus(ctxDeadline, nodeID, siteID, relayID) + + agent.recordRequestMetrics("relay_status", nodeID, err) + + if err != nil { + Log.Errorf("Unable to get relay status for relay %s at node %d: %v", relayID, nodeID, err.Error()) + + failed <- err + + return + } + + results <- relayStatus + }(nodeID) + } + + var allAttemptsMade chan int = make(chan int, 1) + var mergedResult chan RelayStatus = make(chan RelayStatus) + + go func() { + for nRead + nFailed < len(appliedNodes) { + select { + case err := <-failed: + nFailed++ + resultError = err + case r := <-results: + nRead++ + + // If it's connected to this node return right away and cancel any + // ongoing requests to get the status + if r.Connected { + mergedResult <- r + agent.cancelOperation(opID) + } + } + } + + if resultError != nil { + allAttemptsMade <- 1 + } else { + mergedResult <- RelayStatus{ + Connected: false, + ConnectedTo: 0, + Ping: 0, + Site: "", + } + } + + agent.cancelOperation(opID) + }() + + select { + case result := <-mergedResult: + return result, nil + case <-allAttemptsMade: + return RelayStatus{}, resultError + } +} + +func (agent *Agent) newOperation(ctx context.Context) (uint64, context.Context) { + agent.mu.Lock() + defer agent.mu.Unlock() + + var id uint64 = agent.nextOperationID + agent.nextOperationID++ + + ctxDeadline, cancel := context.WithTimeout(ctx, agent.Timeout) + + agent.operationCancellers[id] = cancel + + return id, ctxDeadline +} + +func (agent *Agent) cancelOperation(id uint64) { + agent.mu.Lock() + defer agent.mu.Unlock() + + if cancel, ok := agent.operationCancellers[id]; ok { + cancel() + delete(agent.operationCancellers, id) + } +} + +func (agent *Agent) NQuorum(replicas int) int { + return (replicas / 2) + 1 +} + +func (agent *Agent) CancelAll() { + agent.mu.Lock() + defer agent.mu.Unlock() + + agent.NodeReadRepairer.StopRepairs() + + for id, cancel := range agent.operationCancellers { + cancel() + delete(agent.operationCancellers, id) + } +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/clusterio/clusterio.go b/vendor/github.com/armPelionEdge/devicedb/clusterio/clusterio.go new file mode 100644 index 0000000..aba93af --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/clusterio/clusterio.go @@ -0,0 +1,73 @@ +package clusterio +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/routes" +) + +type ClusterIOAgent interface { + Merge(ctx context.Context, siteID string, bucket string, patch map[string]*SiblingSet) (replicas int, nApplied int, err error) + Batch(ctx context.Context, siteID string, bucket string, updateBatch *UpdateBatch) (replicas int, nApplied int, err error) + Get(ctx context.Context, siteID string, bucket string, keys [][]byte) ([]*SiblingSet, error) + GetMatches(ctx context.Context, siteID string, bucket string, keys [][]byte) (SiblingSetIterator, error) + RelayStatus(ctx context.Context, siteID string, relayID string) (RelayStatus, error) + CancelAll() +} + +type PartitionResolver interface { + Partition(partitioningKey string) uint64 + ReplicaNodes(partition uint64) []uint64 +} + +type NodeClient interface { + Merge(ctx context.Context, nodeID uint64, partition uint64, siteID string, bucket string, patch map[string]*SiblingSet, broadcastToRelays bool) error + Batch(ctx context.Context, nodeID uint64, partition uint64, siteID string, bucket string, updateBatch *UpdateBatch) (map[string]*SiblingSet, error) + Get(ctx context.Context, nodeID uint64, partition uint64, siteID string, bucket string, keys [][]byte) ([]*SiblingSet, error) + GetMatches(ctx context.Context, nodeID uint64, partition uint64, siteID string, bucket string, keys [][]byte) (SiblingSetIterator, error) + RelayStatus(ctx context.Context, nodeID uint64, siteID string, relayID string) (RelayStatus, error) + LocalNodeID() uint64 +} + +type NodeReadMerger interface { + // Add to the pool of replicas for this key + InsertKeyReplica(nodeID uint64, key string, siblingSet *SiblingSet) + // Get the merged set for this key + Get(key string) *SiblingSet + // Obtain a patch that needs to be merged into the specified node to bring it up to date + // for any keys for which there are updates that it has not received + Patch(nodeID uint64) map[string]*SiblingSet + // Get a set of nodes involved in the read merger + Nodes() map[uint64]bool +} + +type NodeReadRepairer interface { + BeginRepair(partition uint64, siteID string, bucket string, readMerger NodeReadMerger) + StopRepairs() +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/clusterio/read_merger.go b/vendor/github.com/armPelionEdge/devicedb/clusterio/read_merger.go new file mode 100644 index 0000000..4baa359 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/clusterio/read_merger.go @@ -0,0 +1,126 @@ +package clusterio +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "sync" + + . "github.com/armPelionEdge/devicedb/data" + "github.com/armPelionEdge/devicedb/resolver" + "github.com/armPelionEdge/devicedb/resolver/strategies" +) + +type ReadMerger struct { + keyVersions map[string]map[uint64]*SiblingSet + mergedKeys map[string]*SiblingSet + conflictResolver resolver.ConflictResolver + mu sync.Mutex +} + +func NewReadMerger(bucket string) *ReadMerger { + var conflictResolver resolver.ConflictResolver + + switch bucket { + case "lww": + conflictResolver = &strategies.LastWriterWins{} + default: + conflictResolver = &strategies.MultiValue{} + } + + return &ReadMerger{ + keyVersions: make(map[string]map[uint64]*SiblingSet), + mergedKeys: make(map[string]*SiblingSet), + conflictResolver: conflictResolver, + } +} + +func (readMerger *ReadMerger) InsertKeyReplica(nodeID uint64, key string, siblingSet *SiblingSet) { + readMerger.mu.Lock() + defer readMerger.mu.Unlock() + + if _, ok := readMerger.keyVersions[key]; !ok { + readMerger.keyVersions[key] = make(map[uint64]*SiblingSet) + } + + if siblingSet == nil { + siblingSet = NewSiblingSet(map[*Sibling]bool{ }) + } + + readMerger.keyVersions[key][nodeID] = siblingSet + + if readMerger.mergedKeys[key] == nil { + readMerger.mergedKeys[key] = siblingSet + } else { + readMerger.mergedKeys[key] = readMerger.mergedKeys[key].Sync(siblingSet) + } +} + +func (readMerger *ReadMerger) Get(key string) *SiblingSet { + readMerger.mu.Lock() + defer readMerger.mu.Unlock() + + if readMerger.mergedKeys[key] == nil { + return nil + } + + if readMerger.mergedKeys[key].Size() == 0 { + return nil + } + + return readMerger.conflictResolver.ResolveConflicts(readMerger.mergedKeys[key]) +} + +func (readMerger *ReadMerger) Patch(nodeID uint64) map[string]*SiblingSet { + readMerger.mu.Lock() + defer readMerger.mu.Unlock() + + var patch map[string]*SiblingSet = make(map[string]*SiblingSet, len(readMerger.keyVersions)) + + for key, versions := range readMerger.keyVersions { + if version, ok := versions[nodeID]; ok { + patch[key] = version.Diff(readMerger.mergedKeys[key]) + } else { + patch[key] = readMerger.mergedKeys[key] + } + } + + return patch +} + + +func (readMerger *ReadMerger) Nodes() map[uint64]bool { + readMerger.mu.Lock() + defer readMerger.mu.Unlock() + + var nodes map[uint64]bool = make(map[uint64]bool) + + for _, keyHolders := range readMerger.keyVersions { + for nodeID, _ := range keyHolders { + nodes[nodeID] = true + } + } + + return nodes +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/clusterio/read_repairer.go b/vendor/github.com/armPelionEdge/devicedb/clusterio/read_repairer.go new file mode 100644 index 0000000..4521f77 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/clusterio/read_repairer.go @@ -0,0 +1,133 @@ +package clusterio +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "sync" + "time" + + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/logging" +) + +type ReadRepairer struct { + NodeClient NodeClient + Timeout time.Duration + mu sync.Mutex + nextOperationID uint64 + operationCancellers map[uint64]func() + stopped bool +} + +func NewReadRepairer(nodeClient NodeClient) *ReadRepairer { + return &ReadRepairer{ + NodeClient: nodeClient, + operationCancellers: make(map[uint64]func()), + } +} + +func (readRepairer *ReadRepairer) BeginRepair(partition uint64, siteID string, bucket string, readMerger NodeReadMerger) { + readRepairer.mu.Lock() + defer readRepairer.mu.Unlock() + + if readRepairer.stopped { + return + } + + var wg sync.WaitGroup + opID, ctxDeadline := readRepairer.newOperation(context.Background()) + + for nodeID, _ := range readMerger.Nodes() { + patch := readMerger.Patch(nodeID) + + for key, siblingSet := range patch { + if siblingSet.Size() == 0 { + // Filter out keys that don't need patch + delete(patch, key) + + continue + } + + Log.Infof("Repairing key %s in bucket %s at site %s at node %d", key, bucket, siteID, nodeID) + } + + if len(patch) == 0 { + continue + } + + wg.Add(1) + + go func(nodeID uint64, patch map[string]*SiblingSet) { + defer wg.Done() + + if err := readRepairer.NodeClient.Merge(ctxDeadline, nodeID, partition, siteID, bucket, patch, true); err != nil { + Log.Errorf("Unable to perform read repair on bucket %s at site %s at node %d: %v", bucket, siteID, nodeID, err.Error()) + } + }(nodeID, patch) + } + + go func() { + wg.Wait() + readRepairer.mu.Lock() + defer readRepairer.mu.Unlock() + + readRepairer.cancelOperation(opID) + }() +} + +func (readRepairer *ReadRepairer) newOperation(ctx context.Context) (uint64, context.Context) { + var id uint64 = readRepairer.nextOperationID + readRepairer.nextOperationID++ + + ctxDeadline, cancel := context.WithTimeout(ctx, readRepairer.Timeout) + + readRepairer.operationCancellers[id] = cancel + + return id, ctxDeadline +} + +func (readRepairer *ReadRepairer) cancelOperation(id uint64) { + if cancel, ok := readRepairer.operationCancellers[id]; ok { + cancel() + delete(readRepairer.operationCancellers, id) + } +} + +func (readRepairer *ReadRepairer) StopRepairs() { + readRepairer.mu.Lock() + defer readRepairer.mu.Unlock() + + if readRepairer.stopped { + return + } + + for opID, cancel := range readRepairer.operationCancellers { + cancel() + delete(readRepairer.operationCancellers, opID) + } + + readRepairer.stopped = true +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/clusterio/sibling_set_merge_iterator.go b/vendor/github.com/armPelionEdge/devicedb/clusterio/sibling_set_merge_iterator.go new file mode 100644 index 0000000..2247225 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/clusterio/sibling_set_merge_iterator.go @@ -0,0 +1,140 @@ +package clusterio +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "sort" + + . "github.com/armPelionEdge/devicedb/data" +) + +type SiblingSetMergeIterator struct { + readMerger NodeReadMerger + keys [][]string + prefixes []string + keySet map[string]bool + prefixIndexes map[string]int + currentPrefixIndex int + currentKeyIndex int +} + +func NewSiblingSetMergeIterator(readMerger NodeReadMerger) *SiblingSetMergeIterator { + return &SiblingSetMergeIterator{ + readMerger: readMerger, + keys: make([][]string, 0), + prefixes: make([]string, 0), + keySet: make(map[string]bool), + prefixIndexes: make(map[string]int), + currentKeyIndex: -1, + currentPrefixIndex: -1, + } +} + +func (iter *SiblingSetMergeIterator) AddKey(prefix string, key string) { + if _, ok := iter.keySet[key]; ok { + // Ignore this request. This key was already added before + return + } + + iter.keySet[key] = true + + if _, ok := iter.prefixIndexes[prefix]; !ok { + // this is a prefix that hasn't been seen before. insert a new key list for this prefix + iter.prefixIndexes[prefix] = len(iter.keys) + iter.keys = append(iter.keys, []string{ }) + iter.prefixes = append(iter.prefixes, prefix) + } + + prefixIndex := iter.prefixIndexes[prefix] + iter.keys[prefixIndex] = append(iter.keys[prefixIndex], key) +} + +func (iter *SiblingSetMergeIterator) SortKeys() { + for _, keys := range iter.keys { + sort.Strings(keys) + } +} + +func (iter *SiblingSetMergeIterator) Next() bool { + if iter.currentPrefixIndex < 0 { + iter.currentPrefixIndex = 0 + } + + if !(iter.currentPrefixIndex < len(iter.keys)) { + return false + } + + iter.currentKeyIndex++ + + if iter.currentKeyIndex >= len(iter.keys[iter.currentPrefixIndex]) { + iter.currentPrefixIndex++ + iter.currentKeyIndex = 0 + } + + return iter.currentPrefixIndex < len(iter.keys) && iter.currentKeyIndex < len(iter.keys[iter.currentPrefixIndex]) +} + +func (iter *SiblingSetMergeIterator) Prefix() []byte { + if iter.currentPrefixIndex < 0 || iter.currentPrefixIndex >= len(iter.keys) || len(iter.keys) == 0 { + return nil + } + + return []byte(iter.prefixes[iter.currentPrefixIndex]) +} + +func (iter *SiblingSetMergeIterator) Key() []byte { + if iter.currentPrefixIndex < 0 || iter.currentPrefixIndex >= len(iter.keys) || len(iter.keys) == 0 { + return nil + } + + if iter.currentKeyIndex >= len(iter.keys[iter.currentPrefixIndex]) { + return nil + } + + return []byte(iter.keys[iter.currentPrefixIndex][iter.currentKeyIndex]) +} + +func (iter *SiblingSetMergeIterator) Value() *SiblingSet { + if iter.currentPrefixIndex < 0 || iter.currentPrefixIndex >= len(iter.keys) || len(iter.keys) == 0 { + return nil + } + + if iter.currentKeyIndex >= len(iter.keys[iter.currentPrefixIndex]) { + return nil + } + + return iter.readMerger.Get(iter.keys[iter.currentPrefixIndex][iter.currentKeyIndex]) +} + +func (iter *SiblingSetMergeIterator) LocalVersion() uint64 { + return 0 +} + +func (iter *SiblingSetMergeIterator) Release() { +} + +func (iter *SiblingSetMergeIterator) Error() error { + return nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/compatibility/compatibility.go b/vendor/github.com/armPelionEdge/devicedb/compatibility/compatibility.go new file mode 100644 index 0000000..a65cd06 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/compatibility/compatibility.go @@ -0,0 +1,322 @@ +// This file provides utility functions that make backwards compatibility with original DeviceDB (written in NodeJS) +// easier +package compatibility +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "sort" + "strings" + "errors" + "fmt" + + . "github.com/armPelionEdge/devicedb/shared" + . "github.com/armPelionEdge/devicedb/storage" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/merkle" + . "github.com/armPelionEdge/devicedb/bucket/builtin" +) + +func SiblingToNormalizedJSON(sibling *Sibling) string { + return valueToJSON(sibling.Value()) + dotToJSON(sibling.Clock().Dot()) + contextToJSON(sibling.Clock().Context()) +} + +func valueToJSON(value []byte) string { + if value == nil { + return "null" + } + + j, _ := json.Marshal(string(value)) + + return string(j) +} + +func dotToJSON(dot Dot) string { + nodeIDJSON, _ := json.Marshal(dot.NodeID) + countJSON, _ := json.Marshal(dot.Count) + + return "[" + string(nodeIDJSON) + "," + string(countJSON) + "]" +} + +func contextToJSON(context map[string]uint64) string { + keys := make([]string, 0, len(context)) + + for k, _ := range context { + keys = append(keys, k) + } + + sort.Strings(keys) + + j := "[" + + for i := 0; i < len(keys); i += 1 { + nodeIDJSON, _ := json.Marshal(keys[i]) + countJSON, _ := json.Marshal(context[keys[i]]) + + j += "[[" + string(nodeIDJSON) + "," + string(countJSON) + "],null]" + + if i != len(keys) - 1 { + j += "," + } + } + + j += "]" + + return j +} + +func HashSiblingSet(key string, siblingSet *SiblingSet) Hash { + siblingsJSON := make([]string, 0, siblingSet.Size()) + + for sibling := range siblingSet.Iter() { + siblingsJSON = append(siblingsJSON, SiblingToNormalizedJSON(sibling)) + } + + sort.Strings(siblingsJSON) + + j, _ := json.Marshal(siblingsJSON) + + return NewHash([]byte(key + string(j))) +} + +func UpgradeLegacyDatabase(legacyDatabasePath string, serverConfig YAMLServerConfig) error { + bucketDataPrefix := "cache." + bucketNameMapping := map[string]string { + "shared": "default", + "lww": "lww", + "cloud": "cloud", + "local": "local", + } + + legacyDatabaseDriver := NewLevelDBStorageDriver(legacyDatabasePath, nil) + newDBStorageDriver := NewLevelDBStorageDriver(serverConfig.DBFile, nil) + bucketList := NewBucketList() + + err := newDBStorageDriver.Open() + + if err != nil { + return err + } + + defer newDBStorageDriver.Close() + + err = legacyDatabaseDriver.Open() + + if err != nil { + return err + } + + defer legacyDatabaseDriver.Close() + + defaultBucket, _ := NewDefaultBucket("", NewPrefixedStorageDriver([]byte{ 0 }, newDBStorageDriver), serverConfig.MerkleDepth) + cloudBucket, _ := NewCloudBucket("", NewPrefixedStorageDriver([]byte{ 1 }, newDBStorageDriver), serverConfig.MerkleDepth, RelayMode) + lwwBucket, _ := NewLWWBucket("", NewPrefixedStorageDriver([]byte{ 2 }, newDBStorageDriver), serverConfig.MerkleDepth) + localBucket, _ := NewLocalBucket("", NewPrefixedStorageDriver([]byte{ 3 }, newDBStorageDriver), MerkleMinDepth) + + bucketList.AddBucket(defaultBucket) + bucketList.AddBucket(cloudBucket) + bucketList.AddBucket(lwwBucket) + bucketList.AddBucket(localBucket) + + iter, err := legacyDatabaseDriver.GetMatches([][]byte{ []byte(bucketDataPrefix) }) + + if err != nil { + return err + } + + for iter.Next() { + key := iter.Key()[len(iter.Prefix()):] + value := iter.Value() + keyParts := strings.Split(string(key), ".") + + if len(keyParts) < 2 { + iter.Release() + + return errors.New(fmt.Sprintf("Key was invalid: %s", key)) + } + + legacyBucketName := keyParts[0] + newBucketName, ok := bucketNameMapping[legacyBucketName] + + if !ok { + Log.Warningf("Cannot translate object at %s because %s is not a recognized bucket name", key, legacyBucketName) + + continue + } + + if !bucketList.HasBucket(newBucketName) { + Log.Warningf("Cannot translate object at %s because %s is not a recognized bucket name", key, newBucketName) + + continue + } + + bucket := bucketList.Get(newBucketName) + siblingSet, err := DecodeLegacySiblingSet(value, legacyBucketName == "lww") + + if err != nil { + Log.Warningf("Unable to decode object at %s (%s): %v", key, string(value), err) + + continue + } + + nonPrefixedKey := string(key)[len(legacyBucketName) + 1:] + err = bucket.Merge(map[string]*SiblingSet{ + nonPrefixedKey: siblingSet, + }) + + if err != nil { + Log.Warningf("Unable to migrate object at %s (%s): %v", key, string(value), err) + } else { + //Log.Debugf("Migrated object in legacy bucket %s at key %s", legacyBucketName, nonPrefixedKey) + } + } + + if iter.Error() != nil { + Log.Errorf("An error occurred while scanning through the legacy database: %v") + } + + iter.Release() + + return nil +} + +func DecodeLegacySiblingSet(data []byte, lww bool) (*SiblingSet, error) { + var lss legacySiblingSet + + err := json.Unmarshal(data, &lss) + + if err != nil { + return nil, err + } + + return lss.ToSiblingSet(lww), nil +} + +type legacySiblingSet []legacySibling + +func (lss *legacySiblingSet) ToSiblingSet(lww bool) *SiblingSet { + var siblings map[*Sibling]bool = make(map[*Sibling]bool, len(*lss)) + + for _, ls := range *lss { + siblings[ls.ToSibling(lww)] = true + } + + return NewSiblingSet(siblings) +} + +type legacySibling struct { + Value *string `json:"value"` + Clock legacyDVV `json:"clock"` + CreationTime uint64 `json:"creationTime"` +} + +type legacyLWWValue struct { + Value *string `json:"value"` + Timestamp *uint64 `json:"timestamp"` +} + +func (ls *legacySibling) ToSibling(lww bool) *Sibling { + var value []byte + + if ls.Value != nil { + var lwwValue legacyLWWValue + + value = []byte(*ls.Value) + + if lww { + err := json.Unmarshal(value, &lwwValue) + + if err == nil && lwwValue.Timestamp != nil { + value = nil + + if lwwValue.Value != nil { + value = []byte(*lwwValue.Value) + } + } + } + } + + return NewSibling(ls.Clock.ToDVV(), value, ls.CreationTime) +} + +type legacyDVV struct { + Dot legacyDot `json:"dot"` + Context []legacyDot `json:"context"` +} + +func (ldvv *legacyDVV) ToDVV() *DVV { + var context map[string]uint64 = make(map[string]uint64, len(ldvv.Context)) + + for _, ld := range ldvv.Context { + context[ld.node] = ld.count + } + + return NewDVV(ldvv.Dot.ToDot(), context) +} + +type legacyDot struct { + node string + count uint64 +} + +func (ld *legacyDot) ToDot() *Dot { + return NewDot(ld.node, ld.count) +} + +func (ld *legacyDot) MarshalJSON() ([]byte, error) { + var a [2]interface{ } + + a[0] = ld.node + a[1] = ld.count + + return json.Marshal(a) +} + +func (ld *legacyDot) UnmarshalJSON(data []byte) error { + var a [2]json.RawMessage + + err := json.Unmarshal(data, &a) + + if err != nil { + return err + } + + err = json.Unmarshal(a[0], &ld.node) + + if err != nil { + return err + } + + err = json.Unmarshal(a[1], &ld.count) + + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/armPelionEdge/devicedb/data/dvv.go b/vendor/github.com/armPelionEdge/devicedb/data/dvv.go new file mode 100644 index 0000000..09426e3 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/data/dvv.go @@ -0,0 +1,170 @@ +package data +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/binary" + "encoding/gob" + "bytes" +) + +type Dot struct { + NodeID string `json:"node"` + Count uint64 `json:"count"` +} + +type DVV struct { + VVDot Dot `json:"dot"` + VV map[string]uint64 `json:"vv"` +} + +func NewDot(nodeID string, count uint64) *Dot { + return &Dot{nodeID, count} +} + +func NewDVV(dot *Dot, vv map[string]uint64) *DVV { + return &DVV{*dot, vv} +} + +func (dvv *DVV) Dot() Dot { + return dvv.VVDot +} + +func (dvv *DVV) Context() map[string]uint64 { + return dvv.VV +} + +func (dvv *DVV) HappenedBefore(otherDVV *DVV) bool { + if _, ok := otherDVV.Context()[dvv.Dot().NodeID]; ok { + return dvv.Dot().Count <= otherDVV.Context()[dvv.Dot().NodeID] + } + + return false +} + +func (dvv *DVV) Replicas() []string { + replicas := make([]string, 0, len(dvv.Context()) + 1) + dotNodeID := dvv.Dot().NodeID + + for nodeID, _ := range dvv.Context() { + replicas = append(replicas, nodeID) + + if nodeID == dotNodeID { + dotNodeID = "" + } + } + + if len(dotNodeID) > 0 { + replicas = append(replicas, dotNodeID) + } + + return replicas +} + +func (dvv *DVV) MaxDot(nodeID string) uint64 { + var maxDot uint64 + + if dvv.Dot().NodeID == nodeID { + if dvv.Dot().Count > maxDot { + maxDot = dvv.Dot().Count + } + } + + if _, ok := dvv.Context()[nodeID]; ok { + if dvv.Context()[nodeID] > maxDot { + maxDot = dvv.Context()[nodeID] + } + } + + return maxDot +} + +func (dvv *DVV) Equals(otherDVV *DVV) bool { + if dvv.Dot().NodeID != otherDVV.Dot().NodeID || dvv.Dot().Count != otherDVV.Dot().Count { + return false + } + + if len(dvv.Context()) != len(otherDVV.Context()) { + return false + } + + for nodeID, count := range dvv.Context() { + if _, ok := otherDVV.Context()[nodeID]; !ok { + return false + } + + if count != otherDVV.Context()[nodeID] { + return false + } + } + + return true +} + +func (dvv *DVV) Hash() Hash { + var hash Hash + + for nodeID, count := range dvv.Context() { + countBuffer := make([]byte, 8) + binary.BigEndian.PutUint64(countBuffer, count) + + hash = hash.Xor(NewHash([]byte(nodeID))).Xor(NewHash(countBuffer)) + } + + countBuffer := make([]byte, 8) + binary.BigEndian.PutUint64(countBuffer, dvv.Dot().Count) + + hash = hash.Xor(NewHash([]byte(dvv.Dot().NodeID))).Xor(NewHash(countBuffer)) + + return hash +} + +func (dvv *DVV) MarshalBinary() ([]byte, error) { + var encoding bytes.Buffer + encoder := gob.NewEncoder(&encoding) + + encoder.Encode(dvv.Context()) + encoder.Encode(dvv.Dot().NodeID) + encoder.Encode(dvv.Dot().Count) + + return encoding.Bytes(), nil +} + +func (dvv *DVV) UnmarshalBinary(data []byte) error { + var dot Dot + var versionVector map[string]uint64 + + encoding := bytes.NewBuffer(data) + decoder := gob.NewDecoder(encoding) + + decoder.Decode(&versionVector) + decoder.Decode(&dot.NodeID) + decoder.Decode(&dot.Count) + + dvv.VVDot = dot + dvv.VV = versionVector + + return nil +} diff --git a/vendor/github.com/armPelionEdge/devicedb/data/hash.go b/vendor/github.com/armPelionEdge/devicedb/data/hash.go new file mode 100644 index 0000000..f60cfa6 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/data/hash.go @@ -0,0 +1,82 @@ +package data +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "crypto/md5" + "encoding/binary" +) + +const ( + HASH_SIZE_BYTES = 16 +) + +type Hash struct { + Hash[2] uint64 +} + +func NewHash(input []byte) Hash { + var newHash Hash + + sum := md5.Sum(input) + + newHash.Hash[1] = binary.BigEndian.Uint64(sum[0:8]) + newHash.Hash[0] = binary.BigEndian.Uint64(sum[8:16]) + + return newHash +} + +func (hash Hash) Xor(otherHash Hash) Hash { + return Hash{[2]uint64{ hash.Hash[0] ^ otherHash.Hash[0], hash.Hash[1] ^ otherHash.Hash[1] }} +} + +func (hash Hash) Bytes() [16]byte { + var result [16]byte + + binary.BigEndian.PutUint64(result[0:8], hash.High()) + binary.BigEndian.PutUint64(result[8:16], hash.Low()) + + return result +} + +func (hash Hash) Low() uint64 { + return hash.Hash[0] +} + +func (hash Hash) SetLow(l uint64) Hash { + hash.Hash[0] = l + + return hash +} + +func (hash Hash) High() uint64 { + return hash.Hash[1] +} + +func (hash Hash) SetHigh(h uint64) Hash { + hash.Hash[1] = h + + return hash +} diff --git a/vendor/github.com/armPelionEdge/devicedb/data/row.go b/vendor/github.com/armPelionEdge/devicedb/data/row.go new file mode 100644 index 0000000..08a335d --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/data/row.go @@ -0,0 +1,65 @@ +package data +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" +) + +type Row struct { + Key string `json:"key"` + LocalVersion uint64 `json:"localVersion"` + Siblings *SiblingSet `json:"siblings"` +} + +func (row *Row) Encode() []byte { + result, _ := json.Marshal(row) + + return result +} + +func (row *Row) Decode(encodedRow []byte, formatVersion string) error { + if formatVersion == "0" { + var siblingSet SiblingSet + + err := siblingSet.Decode(encodedRow) + + if err == nil { + row.LocalVersion = 0 + row.Siblings = &siblingSet + + return nil + } + + // if it fails to decode using format vesion + // 0 which was without the row type then it might + // have been converted already to the new format + // version in a partially completed upgrade before. + // in this case, try to decode it as a regular row + // before returning an error + } + + return json.Unmarshal(encodedRow, row) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/data/sibling.go b/vendor/github.com/armPelionEdge/devicedb/data/sibling.go new file mode 100644 index 0000000..f4207a4 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/data/sibling.go @@ -0,0 +1,116 @@ +package data +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/gob" + "bytes" +) + +type Sibling struct { + VectorClock *DVV `json:"clock"` + BinaryValue []byte `json:"value"` + PhysicalTimestamp uint64 `json:"timestamp"` +} + +func NewSibling(clock *DVV, value []byte, timestamp uint64) *Sibling { + return &Sibling{clock, value, timestamp} +} + +func (sibling *Sibling) Clock() *DVV { + return sibling.VectorClock +} + +func (sibling *Sibling) Value() []byte { + return sibling.BinaryValue +} + +func (sibling *Sibling) IsTombstone() bool { + return sibling.Value() == nil +} + +func (sibling *Sibling) Timestamp() uint64 { + return sibling.PhysicalTimestamp +} + +func (sibling *Sibling) Hash() Hash { + if sibling == nil || sibling.IsTombstone() { + return Hash{[2]uint64{ 0, 0 }} + } + + return NewHash(sibling.Value()).Xor(sibling.Clock().Hash()) +} + +// provides an ordering between siblings in order to break +// ties and decide which one to keep when two siblings have +// the same clock value. Favors keeping a value instead of a +// tombstone. +func (sibling *Sibling) Compare(otherSibling *Sibling) int { + if sibling.IsTombstone() && !otherSibling.IsTombstone() { + return -1 + } else if !sibling.IsTombstone() && otherSibling.IsTombstone() { + return 1 + } else if sibling.IsTombstone() && otherSibling.IsTombstone() { + if sibling.Timestamp() < otherSibling.Timestamp() { + return -1 + } else if sibling.Timestamp() > otherSibling.Timestamp() { + return 1 + } else { + return 0 + } + } else { + return bytes.Compare(sibling.Value(), otherSibling.Value()) + } +} + +func (sibling *Sibling) MarshalBinary() ([]byte, error) { + var encoding bytes.Buffer + encoder := gob.NewEncoder(&encoding) + + encoder.Encode(sibling.Clock()) + encoder.Encode(sibling.Timestamp()) + encoder.Encode(sibling.Value()) + + return encoding.Bytes(), nil +} + +func (sibling *Sibling) UnmarshalBinary(data []byte) error { + var clock DVV + var timestamp uint64 + var value []byte + + encoding := bytes.NewBuffer(data) + decoder := gob.NewDecoder(encoding) + + decoder.Decode(&clock) + decoder.Decode(×tamp) + decoder.Decode(&value) + + sibling.VectorClock = &clock + sibling.PhysicalTimestamp = timestamp + sibling.BinaryValue = value + + return nil +} diff --git a/vendor/github.com/armPelionEdge/devicedb/data/sibling_set.go b/vendor/github.com/armPelionEdge/devicedb/data/sibling_set.go new file mode 100644 index 0000000..44691c1 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/data/sibling_set.go @@ -0,0 +1,369 @@ +package data +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "encoding/gob" + "bytes" +) + +type SiblingSet struct { + siblings map[*Sibling]bool +} + +func NewSiblingSet(siblings map[*Sibling]bool) *SiblingSet { + return &SiblingSet{siblings} +} + +func (siblingSet *SiblingSet) Add(sibling *Sibling) *SiblingSet { + siblingSet.siblings[sibling] = true + + return siblingSet +} + +func (siblingSet *SiblingSet) Delete(sibling *Sibling) *SiblingSet { + delete(siblingSet.siblings, sibling) + + return siblingSet +} + +func (siblingSet *SiblingSet) Has(sibling *Sibling) bool { + _, ok := siblingSet.siblings[sibling] + + return ok +} + +func (siblingSet *SiblingSet) Size() int { + return len(siblingSet.siblings) +} + +func (siblingSet *SiblingSet) Value() []byte { + if siblingSet.Size() != 1 || siblingSet.IsTombstoneSet() { + return nil + } + + for sibling, _ := range siblingSet.siblings { + return sibling.Value() + } + + return nil +} + +func (siblingSet *SiblingSet) Sync(otherSiblingSet *SiblingSet) *SiblingSet { + newSiblingSet := NewSiblingSet(map[*Sibling]bool{ }) + + for mySibling, _ := range siblingSet.siblings { + newSiblingSet.Add(mySibling) + + for theirSibling, _ := range otherSiblingSet.siblings { + if mySibling.Clock().HappenedBefore(theirSibling.Clock()) { + newSiblingSet.Delete(mySibling) + } else if mySibling.Clock().Equals(theirSibling.Clock()) { + // decide which one to keep. they may have the same clock + // but different values if the key was garbage collected + // at some node at some point + if mySibling.Compare(theirSibling) <= 0 { + newSiblingSet.Delete(mySibling) + } + } + } + } + + for theirSibling, _ := range otherSiblingSet.siblings { + newSiblingSet.Add(theirSibling) + + for mySibling, _ := range siblingSet.siblings { + if theirSibling.Clock().HappenedBefore(mySibling.Clock()) { + newSiblingSet.Delete(theirSibling) + } else if theirSibling.Clock().Equals(mySibling.Clock()) { + // decide which one to keep. they may have the same clock + // but different values if the key was garbage collected + // at some node at some point + if theirSibling.Compare(mySibling) < 0 { + newSiblingSet.Delete(theirSibling) + } + } + } + } + + return newSiblingSet +} + +func (siblingSet *SiblingSet) MergeSync(otherSiblingSet *SiblingSet, replica string) *SiblingSet { + // CLD-434 + // Situation: + // A replica has forgotten the causal history (garbage collection or data wipe) + // of a certain key. It may receive a new update + // request for that key after it has been forgotten + // which starts its causal history fresh. If it syncs + // with a node after this that holds the old causal history + // for this key then the new update gets overwritten. This + // is most prominent with garbage collected keys where + // an old tombstone will come back and overwrite the new + // value for the key which to the client looks like the value + // they just wrote disappeared. + otherMaxReplicaDot := otherSiblingSet.JoinOne(replica) + myMaxReplicaDot := siblingSet.JoinOne(replica) + newSiblingSet := NewSiblingSet(map[*Sibling]bool{ }) + maxReplicaDot := otherMaxReplicaDot + + for mySibling, _ := range siblingSet.siblings { + newSiblingSet.Add(mySibling) + + if myMaxReplicaDot < otherMaxReplicaDot { + // Since the other sibling set indicates there were events at + // replica node that aren't known by the replica itself then + // the replica must have forgotten about the history of this + // key at some point due to garbage collection or a data wipe + // to prevent updates from being lost we will generate new siblings + // with the same values + for theirSibling, _ := range otherSiblingSet.siblings { + if mySibling.Clock().HappenedBefore(theirSibling.Clock()) && mySibling.Clock().MaxDot(replica) < theirSibling.Clock().MaxDot(replica) && mySibling.Clock().MaxDot(replica) != 0 { + // mySibling will be overwritten by theirSibling, so replace it with a new sibling + newSiblingSet.Delete(mySibling) + newSiblingSet.Add(NewSibling(NewDVV(NewDot(replica, maxReplicaDot + 1), mySibling.Clock().Context()), mySibling.Value(), mySibling.Timestamp())) + maxReplicaDot++ + } + } + } + } + + return newSiblingSet.Sync(otherSiblingSet) +} + +func (siblingSet *SiblingSet) Diff(otherSiblingSet *SiblingSet) *SiblingSet { + diffSiblingSet := NewSiblingSet(map[*Sibling]bool{ }) + + for theirSibling, _ := range otherSiblingSet.siblings { + diffSiblingSet.Add(theirSibling) + + for mySibling, _ := range siblingSet.siblings { + if theirSibling.Clock().HappenedBefore(mySibling.Clock()) || theirSibling.Clock().Equals(mySibling.Clock()) { + diffSiblingSet.Delete(theirSibling) + } + } + } + + return diffSiblingSet +} + +func (siblingSet *SiblingSet) Join() map[string]uint64 { + collectiveClock := make(map[string]uint64) + + for sibling, _ := range siblingSet.siblings { + for _, replica := range sibling.Clock().Replicas() { + maxDot := sibling.Clock().MaxDot(replica) + + if count, ok := collectiveClock[replica]; !ok || count < maxDot { + collectiveClock[replica] = maxDot + } + } + } + + return collectiveClock +} + +func (siblingSet *SiblingSet) JoinOne(replica string) uint64 { + var s uint64 + + for sibling, _ := range siblingSet.siblings { + maxDot := sibling.Clock().MaxDot(replica) + + if s < maxDot { + s = maxDot + } + } + + return s +} + +func (siblingSet *SiblingSet) Discard(clock *DVV) *SiblingSet { + newSiblingSet := NewSiblingSet(map[*Sibling]bool{}) + + for sibling, _ := range siblingSet.siblings { + if !sibling.Clock().HappenedBefore(clock) { + newSiblingSet.Add(sibling) + } + } + + return newSiblingSet +} + +func (siblingSet *SiblingSet) Event(contextClock map[string]uint64, replica string) *DVV { + var s uint64 + + if count, ok := contextClock[replica]; ok { + s = count + } + + for sibling, _ := range siblingSet.siblings { + if maxDot := sibling.Clock().MaxDot(replica); s < maxDot { + s = maxDot + } + } + + return NewDVV(NewDot(replica, s+1), contextClock) +} + +func (siblingSet *SiblingSet) IsTombstoneSet() bool { + for sibling, _ := range siblingSet.siblings { + if !sibling.IsTombstone() { + return false + } + } + + return true +} + +func (siblingSet *SiblingSet) CanPurge(timestampCutoff uint64) bool { + for sibling := range siblingSet.Iter() { + if !sibling.IsTombstone() || sibling.Timestamp() >= timestampCutoff { + return false + } + } + + return true +} + +func (siblingSet *SiblingSet) GetOldestTombstone() *Sibling { + var oldestTombstone *Sibling + + for sibling, _ := range siblingSet.siblings { + if sibling.IsTombstone() { + if oldestTombstone == nil { + oldestTombstone = sibling + } else if oldestTombstone.Timestamp() > sibling.Timestamp() { + oldestTombstone = sibling + } + } + } + + return oldestTombstone +} + +func (siblingSet *SiblingSet) Iter() <-chan *Sibling { + ch := make(chan *Sibling) + + go func() { + for sibling, _ := range siblingSet.siblings { + ch <- sibling + } + + close(ch) + } () + + return ch +} + +func (siblingSet *SiblingSet) Hash(key []byte) Hash { + if siblingSet == nil { + return Hash{[2]uint64{ 0, 0 }} + } + + var result Hash + + for sibling := range siblingSet.Iter() { + result = result.Xor(sibling.Hash()) + } + + if result.Low() != 0 && result.High() != 0 { + result = result.Xor(NewHash(key)) + } + + return result +} + +func (siblingSet *SiblingSet) MarshalBinary() ([]byte, error) { + var encoding bytes.Buffer + + encoder := gob.NewEncoder(&encoding) + + err := encoder.Encode(siblingSet.siblings) + + return encoding.Bytes(), err +} + +func (siblingSet *SiblingSet) UnmarshalBinary(data []byte) error { + var siblings map[*Sibling]bool + + encodedBuffer := bytes.NewBuffer(data) + decoder := gob.NewDecoder(encodedBuffer) + + err := decoder.Decode(&siblings) + + siblingSet.siblings = siblings + + return err +} + +func (siblingSet *SiblingSet) Encode() []byte { + b, _ := siblingSet.MarshalJSON() + return b + var encoding bytes.Buffer + + encoder := gob.NewEncoder(&encoding) + + _ = encoder.Encode(siblingSet) + + return encoding.Bytes() +} + +func (siblingSet *SiblingSet) Decode(encodedSiblingSet []byte) error { + return siblingSet.UnmarshalJSON(encodedSiblingSet) + encodedBuffer := bytes.NewBuffer(encodedSiblingSet) + decoder := gob.NewDecoder(encodedBuffer) + + err := decoder.Decode(siblingSet) + + return err +} + +func (siblingSet *SiblingSet) MarshalJSON() ([]byte, error) { + siblingList := make([]*Sibling, 0, len(siblingSet.siblings)) + + for ss, _ := range siblingSet.siblings { + siblingList = append(siblingList, ss) + } + + return json.Marshal(siblingList) +} + +func (siblingSet *SiblingSet) UnmarshalJSON(data []byte) error { + siblingList := make([]*Sibling, 0) + err := json.Unmarshal(data, &siblingList) + + if err != nil { + return err + } + + siblingSet.siblings = make(map[*Sibling]bool, len(siblingList)) + + for _, ss := range siblingList { + siblingSet.siblings[ss] = true + } + + return nil +} diff --git a/vendor/github.com/armPelionEdge/devicedb/data/sibling_set_iterator.go b/vendor/github.com/armPelionEdge/devicedb/data/sibling_set_iterator.go new file mode 100644 index 0000000..b1e7994 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/data/sibling_set_iterator.go @@ -0,0 +1,35 @@ +package data +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type SiblingSetIterator interface { + Next() bool + Prefix() []byte + Key() []byte + Value() *SiblingSet + LocalVersion() uint64 + Release() + Error() error +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/data/update.go b/vendor/github.com/armPelionEdge/devicedb/data/update.go new file mode 100644 index 0000000..b956d28 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/data/update.go @@ -0,0 +1,75 @@ +package data +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type Diff struct { + key string + oldSiblingSet *SiblingSet + newSiblingSet *SiblingSet +} + +func (diff Diff) OldSiblingSet() *SiblingSet { + return diff.oldSiblingSet +} + +func (diff Diff) NewSiblingSet() *SiblingSet { + return diff.newSiblingSet +} + +func (diff Diff) Key() string { + return diff.key +} + +type Update struct { + diffs map[string]Diff +} + +func NewUpdate() *Update { + return &Update{ map[string]Diff{ } } +} + +func (update *Update) AddDiff(key string, oldSiblingSet *SiblingSet, newSiblingSet *SiblingSet) *Update { + update.diffs[key] = Diff{key, oldSiblingSet, newSiblingSet} + + return update +} + +func (update *Update) Iter() <-chan Diff { + ch := make(chan Diff) + + go func() { + for _, diff := range update.diffs { + ch <- diff + } + + close(ch) + } () + + return ch +} + +func (update *Update) Size() int { + return len(update.diffs) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/error/errors.go b/vendor/github.com/armPelionEdge/devicedb/error/errors.go new file mode 100644 index 0000000..c830e7e --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/error/errors.go @@ -0,0 +1,120 @@ +package error +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" +) + +type DBerror struct { + Msg string `json:"message"` + ErrorCode int `json:"code"` +} + +func (dbError DBerror) Error() string { + return dbError.Msg +} + +func (dbError DBerror) Code() int { + return dbError.ErrorCode +} + +func (dbError DBerror) JSON() []byte { + json, _ := json.Marshal(dbError) + + return json +} + +const ( + eEMPTY = iota + eLENGTH = iota + eNO_VNODE = iota + eSTORAGE = iota + eCORRUPTED = iota + eINVALID_KEY = iota + eINVALID_BUCKET = iota + eINVALID_BATCH = iota + eMERKLE_RANGE = iota + eINVALID_OP = iota + eINVALID_CONTEXT = iota + eUNAUTHORIZED = iota + eINVALID_PEER = iota + eREAD_BODY = iota + eREQUEST_QUERY = iota + eALERT_BODY = iota + eNODE_CONFIG_BODY = iota + eNODE_DECOMMISSIONING = iota + ePROPOSAL_ERROR = iota + eDUPLICATE_NODE_ID = iota + eNO_SUCH_SITE = iota + eNO_SUCH_RELAY = iota + eNO_SUCH_BUCKET = iota + eNO_QUORUM = iota + eOPERATION_LOCKED = iota + eSNAPSHOT_IN_PROGRESS = iota + eSNAPSHOT_OPEN_FAILED = iota + eSNAPSHOT_READ_FAILED = iota +) + +var ( + EEmpty = DBerror{ "Parameter was empty or nil", eEMPTY } + ELength = DBerror{ "Parameter is too long", eLENGTH } + ENoVNode = DBerror{ "This node does not contain keys in this partition", eNO_VNODE } + EStorage = DBerror{ "The storage driver experienced an error", eSTORAGE } + ECorrupted = DBerror{ "The storage medium is corrupted", eCORRUPTED } + EInvalidKey = DBerror{ "A key was misformatted", eINVALID_KEY } + EInvalidBucket = DBerror{ "An invalid bucket was specified", eINVALID_BUCKET } + EInvalidBatch = DBerror{ "An invalid batch was specified", eINVALID_BATCH } + EMerkleRange = DBerror{ "An invalid merkle node was requested", eMERKLE_RANGE } + EInvalidOp = DBerror{ "An invalid operation was specified", eINVALID_OP } + EInvalidContext = DBerror{ "An invalid context was provided in an update", eINVALID_CONTEXT } + EUnauthorized = DBerror{ "Operation not permitted", eUNAUTHORIZED } + EInvalidPeer = DBerror{ "The specified peer is invalid", eINVALID_PEER } + EReadBody = DBerror{ "Unable to read request body", eREAD_BODY } + ERequestQuery = DBerror{ "Invalid query parameter format", eREQUEST_QUERY } + EAlertBody = DBerror{ "Invalid alert body. Body must be true or false", eALERT_BODY } + ENodeConfigBody = DBerror{ "Invalid node config body.", eNODE_CONFIG_BODY } + ENodeDecommissioning = DBerror{ "This node is in the process of leaving the cluster.", eNODE_DECOMMISSIONING } + EDuplicateNodeID = DBerror{ "The ID the node is using was already used by a cluster member at some point.", eDUPLICATE_NODE_ID } + EProposalError = DBerror{ "An error occurred while proposing cluster configuration change.", ePROPOSAL_ERROR } + ESiteDoesNotExist = DBerror{ "The specified site does not exist at this node.", eNO_SUCH_SITE } + ERelayDoesNotExist = DBerror{ "The specified relay does not exist at this node.", eNO_SUCH_RELAY } + EBucketDoesNotExist = DBerror{ "The site does not contain the specified bucket.", eNO_SUCH_BUCKET } + ENoQuorum = DBerror{ "The database operation was not able to achieve participation from the necessary number of replicas.", eNO_QUORUM } + EOperationLocked = DBerror{ "The attempted operation is currently locked on the partition that the specified data belongs to.", eOPERATION_LOCKED } + ESnapshotInProgress = DBerror{ "The specified snapshot is still in progress", eSNAPSHOT_IN_PROGRESS } + ESnapshotOpenFailed = DBerror{ "The snapshot could not be opened.", eSNAPSHOT_OPEN_FAILED } + ESnapshotReadFailed = DBerror{ "The snapshot could be opened, but it appears to be incomplete or invalid.", eSNAPSHOT_READ_FAILED } +) + +func DBErrorFromJSON(encodedError []byte) (DBerror, error) { + var dbError DBerror + + if err := json.Unmarshal(encodedError, &dbError); err != nil { + return DBerror{}, err + } + + return dbError, nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/historian/historian.go b/vendor/github.com/armPelionEdge/devicedb/historian/historian.go new file mode 100644 index 0000000..9e31fae --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/historian/historian.go @@ -0,0 +1,638 @@ +package historian +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "encoding/binary" + "encoding/base64" + "crypto/rand" + "fmt" + "math" + "sort" + "sync" + + . "github.com/armPelionEdge/devicedb/storage" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" +) + +var ( + BY_TIME_PREFIX = []byte{ 0 } + BY_SOURCE_AND_TIME_PREFIX = []byte{ 1 } + BY_DATA_SOURCE_AND_TIME_PREFIX = []byte{ 2 } + BY_SERIAL_NUMBER_PREFIX = []byte{ 3 } + SEQUENTIAL_COUNTER_PREFIX = []byte{ 4 } + CURRENT_SIZE_COUNTER_PREFIX = []byte{ 5 } + HIGHEST_FORWARDED_INDEX_PREFIX = []byte{ 6 } + DELIMETER = []byte(".") +) + +func randomString() string { + randomBytes := make([]byte, 16) + rand.Read(randomBytes) + + high := binary.BigEndian.Uint64(randomBytes[:8]) + low := binary.BigEndian.Uint64(randomBytes[8:]) + + return fmt.Sprintf("%05x%05x", high, low) +} + +func timestampBytes(ts uint64) []byte { + bytes := make([]byte, 8) + + binary.BigEndian.PutUint64(bytes, ts) + + return bytes +} + +type HistoryQuery struct { + MinSerial *uint64 + MaxSerial *uint64 + Sources []string + Data *string + Order string + Before uint64 + After uint64 + Limit int +} + +type Event struct { + Timestamp uint64 `json:"timestamp"` + SourceID string `json:"source"` + Type string `json:"type"` + Data string `json:"data"` + UUID string `json:"uuid"` + Serial uint64 `json:"serial"` + Groups []string `json:"groups"` +} + +func (event *Event) indexBySerial() []byte { + sEncoding := timestampBytes(event.Serial) + result := make([]byte, 0, len(BY_SERIAL_NUMBER_PREFIX) + len(sEncoding)) + + result = append(result, BY_SERIAL_NUMBER_PREFIX...) + result = append(result, sEncoding...) + + return result +} + +func (event *Event) prefixBySerial() []byte { + return event.indexBySerial() +} + +func (event *Event) indexByTime() []byte { + timestampEncoding := timestampBytes(event.Timestamp) + uuid := []byte(event.UUID) + result := make([]byte, 0, len(BY_TIME_PREFIX) + len(timestampEncoding) + len(DELIMETER) + len(uuid)) + + result = append(result, BY_TIME_PREFIX...) + result = append(result, timestampEncoding...) + result = append(result, DELIMETER...) + result = append(result, uuid...) + + return result +} + +func (event *Event) prefixByTime() []byte { + timestampEncoding := timestampBytes(event.Timestamp) + result := make([]byte, 0, len(BY_TIME_PREFIX) + len(timestampEncoding) + len(DELIMETER)) + + result = append(result, BY_TIME_PREFIX...) + result = append(result, timestampEncoding...) + result = append(result, DELIMETER...) + + return result +} + +func (event *Event) indexBySourceAndTime() []byte { + sourceEncoding := []byte(base64.StdEncoding.EncodeToString([]byte(event.SourceID))) + timestampEncoding := timestampBytes(event.Timestamp) + uuid := []byte(event.UUID) + result := make([]byte, 0, len(BY_SOURCE_AND_TIME_PREFIX) + len(sourceEncoding) + len(DELIMETER) + len(timestampEncoding) + len(DELIMETER) + len(uuid)) + + result = append(result, BY_SOURCE_AND_TIME_PREFIX...) + result = append(result, sourceEncoding...) + result = append(result, DELIMETER...) + result = append(result, timestampEncoding...) + result = append(result, DELIMETER...) + result = append(result, uuid...) + + return result +} + +func (event *Event) prefixBySourceAndTime() []byte { + sourceEncoding := []byte(base64.StdEncoding.EncodeToString([]byte(event.SourceID))) + timestampEncoding := timestampBytes(event.Timestamp) + result := make([]byte, 0, len(BY_SOURCE_AND_TIME_PREFIX) + len(sourceEncoding) + len(DELIMETER) + len(timestampEncoding) + len(DELIMETER)) + + result = append(result, BY_SOURCE_AND_TIME_PREFIX...) + result = append(result, sourceEncoding...) + result = append(result, DELIMETER...) + result = append(result, timestampEncoding...) + result = append(result, DELIMETER...) + + return result +} + +func (event *Event) indexByDataSourceAndTime() []byte { + sourceEncoding := []byte(base64.StdEncoding.EncodeToString([]byte(event.SourceID))) + dataEncoding := []byte(base64.StdEncoding.EncodeToString([]byte(event.Data))) + timestampEncoding := timestampBytes(event.Timestamp) + uuid := []byte(event.UUID) + result := make([]byte, 0, len(BY_SOURCE_AND_TIME_PREFIX) + len(dataEncoding) + len(DELIMETER) + len(sourceEncoding) + len(DELIMETER) + len(timestampEncoding) + len(DELIMETER) + len(uuid)) + + result = append(result, BY_DATA_SOURCE_AND_TIME_PREFIX...) + result = append(result, dataEncoding...) + result = append(result, DELIMETER...) + result = append(result, sourceEncoding...) + result = append(result, DELIMETER...) + result = append(result, timestampEncoding...) + result = append(result, DELIMETER...) + result = append(result, uuid...) + + return result +} + +func (event *Event) prefixByDataSourceAndTime() []byte { + sourceEncoding := []byte(base64.StdEncoding.EncodeToString([]byte(event.SourceID))) + dataEncoding := []byte(base64.StdEncoding.EncodeToString([]byte(event.Data))) + timestampEncoding := timestampBytes(event.Timestamp) + result := make([]byte, 0, len(BY_SOURCE_AND_TIME_PREFIX) + len(dataEncoding) + len(DELIMETER) + len(sourceEncoding) + len(DELIMETER) + len(timestampEncoding) + len(DELIMETER)) + + result = append(result, BY_DATA_SOURCE_AND_TIME_PREFIX...) + result = append(result, dataEncoding...) + result = append(result, DELIMETER...) + result = append(result, sourceEncoding...) + result = append(result, DELIMETER...) + result = append(result, timestampEncoding...) + result = append(result, DELIMETER...) + + return result +} + +type Historian struct { + storageDriver StorageDriver + nextID uint64 + currentSize uint64 + forwardIndex uint64 + logLock sync.Mutex + // event limit describes the maximum number of events + // allowed to be stored in the history log + // before old events must be purged. + eventLimit uint64 + // event floor describes the minimum number of events + // that must be remembered after log rotation occurs + eventFloor uint64 + // when events are purged from the history log this + // value describes how many events are deleted + // per batch. In other words, if 100 events are + // being purged and the purge batch size is 20 + // then 5 batches will be applied to the data store. + purgeBatchSize int +} + +func NewHistorian(storageDriver StorageDriver, eventLimit uint64, eventFloor uint64, purgeBatchSize int) *Historian { + var nextID uint64 + var currentSize uint64 + var forwardIndex uint64 + + values, err := storageDriver.Get([][]byte{ SEQUENTIAL_COUNTER_PREFIX, CURRENT_SIZE_COUNTER_PREFIX, HIGHEST_FORWARDED_INDEX_PREFIX }) + + if err == nil && len(values[0]) == 8 { + nextID = binary.BigEndian.Uint64(values[0]) + } + + if err == nil && len(values[1]) == 8 { + currentSize = binary.BigEndian.Uint64(values[1]) + } + + if err == nil && len(values[2]) == 8 { + forwardIndex = binary.BigEndian.Uint64(values[2]) + } + + historian := &Historian{ + storageDriver: storageDriver, + nextID: nextID + 1, + forwardIndex: forwardIndex, + currentSize: currentSize, + eventLimit: eventLimit, + eventFloor: eventFloor, + purgeBatchSize: purgeBatchSize, + } + + historian.RotateLog() + + return historian +} + +func (historian *Historian) LogSize() uint64 { + return historian.currentSize +} + +func (historian *Historian) LogSerial() uint64 { + return historian.nextID +} + +func (historian *Historian) SetLogSerial(s uint64) error { + historian.logLock.Lock() + defer historian.logLock.Unlock() + + if s < historian.nextID - 1 { + return nil + } + + batch := NewBatch() + batch.Put(SEQUENTIAL_COUNTER_PREFIX, timestampBytes(s)) + + err := historian.storageDriver.Batch(batch) + + if err != nil { + Log.Errorf("Storage driver error in SetLogSerial(%v): %s", s, err.Error()) + + return EStorage + } + + historian.nextID = s + 1 + + return nil +} + +func (historian *Historian) ForwardIndex() uint64 { + return historian.forwardIndex +} + +func (historian *Historian) SetForwardIndex(i uint64) error { + historian.logLock.Lock() + defer historian.logLock.Unlock() + + batch := NewBatch() + batch.Put(HIGHEST_FORWARDED_INDEX_PREFIX, timestampBytes(i)) + + err := historian.storageDriver.Batch(batch) + + if err != nil { + Log.Errorf("Storage driver error in SetForwardIndex(%v): %s", i, err.Error()) + + return EStorage + } + + historian.forwardIndex = i + + return nil +} + +func (historian *Historian) LogEvent(event *Event) error { + // events must be logged sequentially to preserve the invariant that when a batch is written to + // disk no other batch that has been written before it has a serial number greater than itself + historian.logLock.Lock() + defer historian.logLock.Unlock() + + // indexed by time + // indexed by resourceid + time + // indexed by eventdata + resourceID + time + event.UUID = randomString() + event.Serial = historian.nextID + + batch := NewBatch() + marshaledEvent, err := json.Marshal(event) + + if err != nil { + Log.Errorf("Could not marshal event to JSON: %v", err.Error()) + + return EStorage + } + + batch.Put(event.indexByTime(), []byte(marshaledEvent)) + batch.Put(event.indexBySourceAndTime(), []byte(marshaledEvent)) + batch.Put(event.indexByDataSourceAndTime(), []byte(marshaledEvent)) + batch.Put(event.indexBySerial(), []byte(marshaledEvent)) + batch.Put(SEQUENTIAL_COUNTER_PREFIX, timestampBytes(event.Serial)) + batch.Put(CURRENT_SIZE_COUNTER_PREFIX, timestampBytes(historian.currentSize + 1)) + + err = historian.storageDriver.Batch(batch) + + if err != nil { + Log.Errorf("Storage driver error in LogEvent(%v): %s", event, err.Error()) + + return EStorage + } + + historian.nextID += 1 + historian.currentSize += 1 + + err = historian.RotateLog() + + if err != nil { + return EStorage + } + + return nil +} + +func (historian *Historian) Query(query *HistoryQuery) (*EventIterator, error) { + var ranges [][2][]byte + var direction int + + if query.Order == "desc" { + direction = BACKWARD + } else { + direction = FORWARD + } + + // query.Before is unspecified so default to max range + if query.Before == 0 { + query.Before = math.MaxUint64 + } + + // ensure consistent ordering from multiple sources + sort.Strings(query.Sources) + + if query.MinSerial != nil { + ranges = make([][2][]byte, 1) + + ranges[0] = [2][]byte{ + (&Event{ Serial: *query.MinSerial }).prefixBySerial(), + (&Event{ Serial: math.MaxUint64 }).prefixBySerial(), + } + } else if query.MaxSerial != nil { + ranges = make([][2][]byte, 1) + + ranges[0] = [2][]byte{ + (&Event{ Serial: 0 }).prefixBySerial(), + (&Event{ Serial: *query.MaxSerial }).prefixBySerial(), + } + } else if len(query.Sources) == 0 { + // time -> indexByTime + ranges = make([][2][]byte, 1) + + ranges[0] = [2][]byte{ + (&Event{ Timestamp: query.After }).prefixByTime(), + (&Event{ Timestamp: query.Before }).prefixByTime(), + } + } else if query.Data == nil { + // sources + time -> indexBySourceAndTime + ranges = make([][2][]byte, 0, len(query.Sources)) + + for _, source := range query.Sources { + ranges = append(ranges, [2][]byte{ + (&Event{ SourceID: source, Timestamp: query.After }).prefixBySourceAndTime(), + (&Event{ SourceID: source, Timestamp: query.Before }).prefixBySourceAndTime(), + }) + } + } else { + // data + sources + time -> indexByDataSourceAndTime + ranges = make([][2][]byte, 0, len(query.Sources)) + + for _, source := range query.Sources { + ranges = append(ranges, [2][]byte{ + (&Event{ Data: *query.Data, SourceID: source, Timestamp: query.After }).prefixByDataSourceAndTime(), + (&Event{ Data: *query.Data, SourceID: source, Timestamp: query.Before }).prefixByDataSourceAndTime(), + }) + } + } + + iter, err := historian.storageDriver.GetRanges(ranges, direction) + + if err != nil { + Log.Errorf("Storage driver error in Query(%v): %s", query, err.Error()) + + return nil, err + } + + return NewEventIterator(iter, query.Limit), nil +} + +func (historian *Historian) Purge(query *HistoryQuery) error { + historian.logLock.Lock() + defer historian.logLock.Unlock() + + return historian.purge(query) +} + +func (historian *Historian) purge(query *HistoryQuery) error { + eventIterator, err := historian.Query(query) + + if err != nil { + return err + } + + var eventBatch []*Event + + if historian.purgeBatchSize <= 0 { + eventBatch = make([]*Event, 1) + } else { + eventBatch = make([]*Event, historian.purgeBatchSize) + } + + var currentBatchSize int = 0 + + for eventIterator.Next() { + eventBatch[currentBatchSize] = eventIterator.Event() + currentBatchSize++ + + if currentBatchSize < len(eventBatch) { + continue + } + + // Precondition at this point: currentBatchSize == len(eventBatch) + err := historian.purgeEvents(eventBatch) + + if err != nil { + return err + } + + // This needs to be reset since the next events belong to a new batch + currentBatchSize = 0 + } + + if eventIterator.Error() != nil { + Log.Errorf("Storage driver error in Purge(%v): %s", query, err.Error()) + + return eventIterator.Error() + } + + // This needs to be called after the main loop exits since + // there might have been a partial batch at the end of the + // list of events that were iterated through. Since in the + // loop above the batch only gets written once it is full + // there can be some leftover. + err = historian.purgeEvents(eventBatch[:currentBatchSize]) + + if err != nil { + return err + } + + return nil +} + +func (historian *Historian) purgeEvents(events []*Event) error { + batch := NewBatch() + + for _, event := range events { + batch.Delete(event.indexByTime()) + batch.Delete(event.indexBySourceAndTime()) + batch.Delete(event.indexByDataSourceAndTime()) + batch.Delete(event.indexBySerial()) + } + + batch.Put(CURRENT_SIZE_COUNTER_PREFIX, timestampBytes(historian.currentSize - uint64(len(events)))) + + err := historian.storageDriver.Batch(batch) + + if err != nil { + Log.Errorf("Storage driver error in purgeEvents(): %s", err.Error()) + + return EStorage + } + + historian.currentSize -= uint64(len(events)) + + return nil +} + +// The behavior of log rotation is determined by the +// relationship between eventLimit, eventFloor, and +// purgeBatchSize. Log rotation occurs when the log +// contains eventLimit events. Log rotation will +// delete old events until there are eventFloor +// events remaining in the log. Configuring these +// two values can adjust the performance characteristics +// of the history log and how aggressively disk space is +// conserved. +// +// Here are some example configurations and +// their performance characteristics: +// +// eventLimit = 100000 eventFloor = 99999 +// In this case the most events ever stored +// on disk will be 100000. However, after +// there are at least 99999 events written to +// the log every new log entry will require +// a log rotation operation to occur in order +// to keep the log size under the limit. +// eventLimit = 200000 eventFloor = 100000 +// In this case a purge operation only occurs +// once every 100000 writes instead of on every +// write. +// +// if eventFloor is greater than or equal to +// eventLimit then eventFloor is ignored and +// eventLimit is used as the event floor +func (historian *Historian) RotateLog() error { + if historian.eventLimit != 0 && historian.currentSize > historian.eventLimit { + var minSerial uint64 = 0 + var err error + + if historian.eventFloor < historian.eventLimit { + Log.Debugf("Purging oldest %d items from history log (currentSize=%d eventFloor=%d)", int(historian.currentSize - historian.eventFloor), historian.currentSize, historian.eventFloor) + err = historian.purge(&HistoryQuery{ MinSerial: &minSerial, Limit: int(historian.currentSize - historian.eventFloor) }) + } else { + Log.Debugf("Purging oldest %d items from history log (currentSize=%d eventLimit=%d)", int(historian.currentSize - historian.eventLimit), historian.currentSize, historian.eventFloor) + err = historian.purge(&HistoryQuery{ MinSerial: &minSerial, Limit: int(historian.currentSize - historian.eventLimit) }) + } + + if err != nil { + return err + } + } + + return nil +} + +type EventIterator struct { + dbIterator StorageIterator + parseError error + currentEvent *Event + limit uint64 + eventsSeen uint64 +} + +func NewEventIterator(iterator StorageIterator, limit int) *EventIterator { + if limit <= 0 { + limit = 0 + } + + return &EventIterator{ + dbIterator: iterator, + parseError: nil, + currentEvent: nil, + limit: uint64(limit), + eventsSeen: 0, + } +} + +func (ei *EventIterator) Next() bool { + ei.currentEvent = nil + + if !ei.dbIterator.Next() { + if ei.dbIterator.Error() != nil { + Log.Errorf("Storage driver error in Next(): %s", ei.dbIterator.Error()) + } + + return false + } + + var event Event + + ei.parseError = json.Unmarshal(ei.dbIterator.Value(), &event) + + if ei.parseError != nil { + Log.Errorf("Storage driver error in Next() key = %v, value = %v: %s", ei.dbIterator.Key(), ei.dbIterator.Value(), ei.parseError.Error()) + + ei.Release() + + return false + } + + ei.currentEvent = &event + ei.eventsSeen += 1 + + if ei.limit != 0 && ei.eventsSeen == ei.limit { + ei.Release() + } + + return true +} + +func (ei *EventIterator) Event() *Event { + return ei.currentEvent +} + +func (ei *EventIterator) Release() { + ei.dbIterator.Release() +} + +func (ei *EventIterator) Error() error { + if ei.parseError != nil { + return EStorage + } + + if ei.dbIterator.Error() != nil { + return EStorage + } + + return nil +} diff --git a/vendor/github.com/armPelionEdge/devicedb/logging/logging.go b/vendor/github.com/armPelionEdge/devicedb/logging/logging.go new file mode 100644 index 0000000..b10d8d1 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/logging/logging.go @@ -0,0 +1,32 @@ +package logging +// +// Copyright (c) 2019 ARM Limited. +// +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to +// deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +// sell copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// + + +import ( + "github.com/WigWagCo/wigwag-go-logger/logging" +) + +var Log = logging.Log +var SetLoggingLevel = logging.SetLoggingLevel \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/merkle/merkle.go b/vendor/github.com/armPelionEdge/devicedb/merkle/merkle.go new file mode 100644 index 0000000..75ad509 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/merkle/merkle.go @@ -0,0 +1,415 @@ +// depth | memory overhead +// 5 | 512 bytes (0.5 KiB) +// 10 | 16384 bytes (16 KiB) +// 15 | 524288 bytes (512 KiB) +// 19 | 8388608 bytes (8192 KiB) (8 MiB) +// 20 | 16777216 bytes (16384 KiB) (16 MiB) + +package merkle +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "math" + "errors" + "unsafe" + "strconv" + "sync" + + . "github.com/armPelionEdge/devicedb/data" +) + +// MerkleMaxDepth should never exceed 32 +const MerkleMinDepth uint8 = 1 +const MerkleDefaultDepth uint8 = 19 +const MerkleMaxDepth uint8 = 28 // 4GB + +type MerkleTree struct { + depth uint8 + nodes []Hash + lock sync.Mutex +} + +func NewMerkleTree(depth uint8) (*MerkleTree, error) { + if depth < MerkleMinDepth || depth > MerkleMaxDepth { + return nil, errors.New("depth must be between " + strconv.Itoa(int(MerkleMinDepth)) + " and " + strconv.Itoa(int(MerkleMaxDepth))) + } + + nodes := make([]Hash, uint32(math.Pow(float64(2), float64(depth)))) + + return &MerkleTree{ + depth: depth, + nodes: nodes, + }, nil +} + +func NewDummyMerkleTree(depth uint8) (*MerkleTree, error) { + if depth < MerkleMinDepth || depth > MerkleMaxDepth { + return nil, errors.New("depth must be between " + strconv.Itoa(int(MerkleMinDepth)) + " and " + strconv.Itoa(int(MerkleMaxDepth))) + } + + return &MerkleTree{ + depth: depth, + }, nil +} + +func (tree *MerkleTree) RootHash() Hash { + tree.lock.Lock() + defer tree.lock.Unlock() + + return tree.nodes[1 << (tree.depth - 1)] +} + +func (tree *MerkleTree) RangeHash(rangeMin uint32, rangeMax uint32) Hash { + tree.lock.Lock() + defer tree.lock.Unlock() + + return tree.nodes[rangeMin + (rangeMax - rangeMin)/2] +} + +func (tree *MerkleTree) NodeHash(node uint32) Hash { + tree.lock.Lock() + defer tree.lock.Unlock() + + if node >= uint32(len(tree.nodes)) { + return Hash{} + } + + return tree.nodes[node] +} + +func (tree *MerkleTree) NodeLimit() uint32 { + if tree.nodes == nil { + return uint32(math.Pow(float64(2), float64(tree.depth))) + } + + return uint32(len(tree.nodes)) +} + +func (tree *MerkleTree) SubRangeMin(nodeID uint32) uint32 { + return nodeID - (1 << CountTrailingZeros(nodeID)) +} + +func (tree *MerkleTree) SubRangeMax(nodeID uint32) uint32 { + return nodeID + (1 << CountTrailingZeros(nodeID)) +} + +func (tree *MerkleTree) Level(nodeID uint32) uint8 { + return tree.Depth() - uint8(CountTrailingZeros(nodeID)) +} + +func (tree *MerkleTree) Depth() uint8 { + return tree.depth +} + +func (tree *MerkleTree) SetNodeHashes(nodeHashes map[uint32]Hash) { + for nodeID, hash := range nodeHashes { + tree.nodes[nodeID] = hash + } +} + +func (tree *MerkleTree) SetNodeHash(nodeID uint32, hash Hash) { + tree.nodes[nodeID] = hash +} + +func (tree *MerkleTree) TranslateNode(nodeID uint32, depth uint8) uint32 { + if tree.Depth() < depth { + return nodeID << (depth - tree.Depth()) + } + + return nodeID >> (tree.Depth() - depth) +} + +func (tree *MerkleTree) LeafNode(key []byte) uint32 { + keyHash := NewHash(key) + + return LeafNode(&keyHash, tree.depth) +} + +func (tree *MerkleTree) RootNode() uint32 { + return 1 << (tree.depth - 1) +} + +func (tree *MerkleTree) LeftChild(node uint32) uint32 { + if node & 0x1 == 0x1 { + return node + } + + return node - (1 << (CountTrailingZeros(node) - 1)) +} + +func (tree *MerkleTree) RightChild(node uint32) uint32 { + if node & 0x1 == 0x1 { + return node + } + + return node + (1 << (CountTrailingZeros(node) - 1)) +} + +// this +func (tree *MerkleTree) UpdateLeafHash(nodeID uint32, hash Hash) { + tree.lock.Lock() + defer tree.lock.Unlock() + + if !tree.IsLeaf(nodeID) { + return + } + + tree.SetNodeHash(nodeID, hash) + node := nodeID + + for node != ParentNode(tree.RootNode()) { + left := tree.LeftChild(node) + right := tree.RightChild(node) + + if node & 0x1 != 1 { + tree.nodes[node] = tree.nodes[left].Xor(tree.nodes[right]) + } + + node = ParentNode(node) + } +} + +func (tree *MerkleTree) IsLeaf(nodeID uint32) bool { + return nodeID & 0x1 == 1 && nodeID < (1 << tree.Depth()) +} + +func (tree *MerkleTree) Update(update *Update) (map[uint32]bool, map[uint32]map[string]Hash) { + tree.lock.Lock() + defer tree.lock.Unlock() + + modifiedNodes := make(map[uint32]bool) + objectHashes := make(map[uint32]map[string]Hash) + nodeQueue := NewQueue(uint32(update.Size())) + + // should return a set of changes that should be persisted + for diff := range update.Iter() { + key := diff.Key() + keyHash := NewHash([]byte(key)) + newObjectHash := diff.NewSiblingSet().Hash([]byte(key)) + oldObjectHash := diff.OldSiblingSet().Hash([]byte(key)) + leaf := LeafNode(&keyHash, tree.depth) + + if _, ok := objectHashes[leaf]; !ok { + objectHashes[leaf] = make(map[string]Hash) + } + + objectHashes[leaf][key] = newObjectHash + nodeQueue.Enqueue(leaf) + + tree.nodes[leaf] = tree.nodes[leaf].Xor(oldObjectHash).Xor(newObjectHash) + } + + for nodeQueue.Size() > 0 { + node := nodeQueue.Dequeue() + shift := CountTrailingZeros(node) - 1 + left := node - (1 << shift) + right := node + (1 << shift) + + modifiedNodes[node] = true + + if node != tree.RootNode() { + nodeQueue.Enqueue(ParentNode(node)) + } + + if node & 0x1 != 1 { + tree.nodes[node] = tree.nodes[left].Xor(tree.nodes[right]) + } + } + + return modifiedNodes, objectHashes +} + +func (tree *MerkleTree) UndoUpdate(update *Update) { + tree.lock.Lock() + defer tree.lock.Unlock() + + nodeQueue := NewQueue(uint32(update.Size())) + + // should return a set of changes that should be persisted + for diff := range update.Iter() { + key := diff.Key() + keyHash := NewHash([]byte(key)) + newObjectHash := diff.NewSiblingSet().Hash([]byte(key)) + oldObjectHash := diff.OldSiblingSet().Hash([]byte(key)) + leaf := LeafNode(&keyHash, tree.depth) + + nodeQueue.Enqueue(leaf) + + tree.nodes[leaf] = tree.nodes[leaf].Xor(oldObjectHash).Xor(newObjectHash) + } + + for nodeQueue.Size() > 0 { + node := nodeQueue.Dequeue() + shift := CountTrailingZeros(node) - 1 + left := node - (1 << shift) + right := node + (1 << shift) + + if node != tree.RootNode() { + nodeQueue.Enqueue(ParentNode(node)) + } + + if node & 0x1 != 1 { + tree.nodes[node] = tree.nodes[left].Xor(tree.nodes[right]) + } + } +} + +func (tree *MerkleTree) PreviewUpdate(update *Update) (map[uint32]Hash, map[uint32]map[string]Hash) { + tree.lock.Lock() + defer tree.lock.Unlock() + + leafHashes := make(map[uint32]Hash) + objectHashes := make(map[uint32]map[string]Hash) + + for diff := range update.Iter() { + key := diff.Key() + keyHash := NewHash([]byte(key)) + newObjectHash := diff.NewSiblingSet().Hash([]byte(key)) + oldObjectHash := diff.OldSiblingSet().Hash([]byte(key)) + leaf := LeafNode(&keyHash, tree.depth) + + if _, ok := objectHashes[leaf]; !ok { + objectHashes[leaf] = make(map[string]Hash) + } + + if _, ok := leafHashes[leaf]; !ok { + leafHashes[leaf] = tree.nodes[leaf] + } + + objectHashes[leaf][key] = newObjectHash + leafHashes[leaf] = leafHashes[leaf].Xor(oldObjectHash).Xor(newObjectHash) + } + + return leafHashes, objectHashes +} + +func LeafNode(keyHash *Hash, depth uint8) uint32 { + // need to force hash value into depth bytes + // max: 64 - 1: 63 -> normalizedHash: [0, 1] + // min: 64 - 28: 36 -> normalizedHash: [0, 268435455] + var shiftAmount uint8 = uint8(unsafe.Sizeof(keyHash.High()))*8 - depth + var normalizedHash uint32 = uint32(keyHash.High() >> shiftAmount) + + return normalizedHash | 0x1 +} + +func ParentNode(node uint32) uint32 { + shift := CountTrailingZeros(node) + var parentOffset uint32 = 1 << shift + var direction uint32 = (node >> (shift + 1)) & 0x1 + + if direction == 1 { + return node - parentOffset + } + + return node + parentOffset +} + +func CountTrailingZeros(n uint32) uint32 { + var c uint32 = 0 + + if n & 0x1 == 0 { + c = 1 + + if (n & 0xffff) == 0 { + n >>= 16 + c += 16 + } + + if (n & 0xff) == 0 { + n >>= 8 + c += 8 + } + + if (n & 0xf) == 0 { + n >>= 4 + c += 4 + } + + if (n & 0x3) == 0 { + n >>= 2 + c += 2 + } + + c -= n & 0x1 + } + + return c +} + +func abs(v int32) uint32 { + if v < 0 { + return uint32(v*-1) + } + + return uint32(v) +} + +type queue struct { + q []uint32 + head uint32 + size uint32 +} + +func NewQueue(capacity uint32) *queue { + return &queue{ make([]uint32, capacity), 0, 0 } +} + +func (q *queue) Size() uint32 { + return q.size +} + +func (q *queue) Enqueue(n uint32) { + if q.size == uint32(len(q.q)) { + return + } + + i := q.head + q.size + + if i >= uint32(len(q.q)) { + i = i - uint32(len(q.q)) + } + + q.q[i] = n + q.size += 1 +} + +func (q *queue) Dequeue() uint32 { + if q.size == 0 { + return 0 + } + + n := q.q[q.head] + + q.head += 1 + q.size -= 1 + + if q.head >= uint32(len(q.q)) { + q.head = 0 + } + + return n +} diff --git a/vendor/github.com/armPelionEdge/devicedb/node/cluster_node.go b/vendor/github.com/armPelionEdge/devicedb/node/cluster_node.go new file mode 100644 index 0000000..86263f0 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/cluster_node.go @@ -0,0 +1,1235 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "crypto/tls" + "encoding/binary" + "errors" + "fmt" + "io" + "math/rand" + "net/http" + "sync" + "time" + + . "github.com/armPelionEdge/devicedb/bucket" + "github.com/armPelionEdge/devicedb/client" + . "github.com/armPelionEdge/devicedb/cluster" + "github.com/armPelionEdge/devicedb/clusterio" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/merkle" + . "github.com/armPelionEdge/devicedb/partition" + . "github.com/armPelionEdge/devicedb/raft" + . "github.com/armPelionEdge/devicedb/routes" + . "github.com/armPelionEdge/devicedb/server" + . "github.com/armPelionEdge/devicedb/site" + . "github.com/armPelionEdge/devicedb/storage" + ddbSync "github.com/armPelionEdge/devicedb/sync" + . "github.com/armPelionEdge/devicedb/transfer" + . "github.com/armPelionEdge/devicedb/util" + + "github.com/gorilla/websocket" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" +) + +const ( + RaftStoreStoragePrefix = iota + SiteStoreStoragePrefix = iota + SnapshotMetadataPrefix = iota +) + +const SnapshotUUIDKey string = "UUID" + +const ClusterJoinRetryTimeout = 5 + +type ClusterNodeConfig struct { + StorageDriver StorageDriver + CloudServer *CloudServer + MerkleDepth uint8 + Capacity uint64 + NoValidate bool +} + +type ClusterNode struct { + interClusterClient *client.Client + configController ClusterConfigController + configControllerBuilder ClusterConfigControllerBuilder + cloudServer *CloudServer + raftTransport *TransportHub + raftStore RaftNodeStorage + transferAgent PartitionTransferAgent + clusterioAgent clusterio.ClusterIOAgent + storageDriver StorageDriver + partitionFactory PartitionFactory + partitionPool PartitionPool + joinedCluster chan int + leftCluster chan int + leftClusterResult chan error + isRunning bool + shutdown chan int + empty chan int + initializedCB func() + merkleDepth uint8 + capacity uint64 + shutdownDecommissioner func() + lock sync.Mutex + emptyMu sync.Mutex + relayConnectionsMu sync.Mutex + hub *Hub + noValidate bool + snapshotsDirectory string + snapshotter *Snapshotter +} + +func New(config ClusterNodeConfig) *ClusterNode { + if config.MerkleDepth < MerkleMinDepth { + config.MerkleDepth = MerkleDefaultDepth + } + + clusterNode := &ClusterNode{ + storageDriver: config.StorageDriver, + cloudServer: config.CloudServer, + raftStore: NewRaftStorage(NewPrefixedStorageDriver([]byte{ RaftStoreStoragePrefix }, config.StorageDriver)), + raftTransport: NewTransportHub(0), + configControllerBuilder: &ConfigControllerBuilder{ }, + interClusterClient: client.NewClient(client.ClientConfig{ }), + merkleDepth: config.MerkleDepth, + capacity: config.Capacity, + partitionFactory: NewDefaultPartitionFactory(), + partitionPool: NewDefaultPartitionPool(), + noValidate: config.NoValidate, + } + + if clusterNode.noValidate { + Log.Criticalf("!!! Starting node with NoValidate set to true. This option should not be used in production as it allows connecting relays to determine their own ID based on an HTTP header. This is for use in testing only and should not be active in a production cluster !!!") + } + + return clusterNode +} + +func (node *ClusterNode) UseRaftStore(raftStore RaftNodeStorage) { + node.raftStore = raftStore +} + +func (node *ClusterNode) getNodeID() (uint64, error) { + if err := node.raftStore.Open(); err != nil { + Log.Criticalf("Local node unable to open raft store: %v", err.Error()) + + return 0, err + } + + nodeID, err := node.raftStore.NodeID() + + if err != nil { + Log.Criticalf("Local node unable to obtain node ID from raft store: %v", err.Error()) + + return 0, err + } + + if nodeID == 0 { + nodeID = UUID64() + + Log.Infof("Local node initializing with ID %d", nodeID) + + if err := node.raftStore.SetNodeID(nodeID); err != nil { + Log.Criticalf("Local node unable to store new node ID: %v", err.Error()) + + return 0, err + } + } + + return nodeID, nil +} + +func (node *ClusterNode) Start(options NodeInitializationOptions) error { + node.isRunning = true + node.shutdown = make(chan int) + node.joinedCluster = make(chan int, 1) + node.leftCluster = make(chan int, 1) + node.snapshotsDirectory = options.SnapshotDirectory + + if err := node.openStorageDriver(); err != nil { + return err + } + + nodeID, err := node.getNodeID() + + if err != nil { + return err + } + + node.snapshotter = &Snapshotter{ + nodeID: nodeID, + snapshotsDirectory: node.snapshotsDirectory, + storageDriver: node.storageDriver, + } + + Log.Infof("Local node (id = %d) starting up...", nodeID) + + node.raftTransport.SetLocalPeerID(nodeID) + + clusterHost, clusterPort := options.ClusterAddress() + node.configControllerBuilder.SetLocalNodeAddress(PeerAddress{ NodeID: nodeID, Host: clusterHost, Port: clusterPort }) + node.configControllerBuilder.SetRaftNodeStorage(node.raftStore) + node.configControllerBuilder.SetRaftNodeTransport(node.raftTransport) + node.configControllerBuilder.SetCreateNewCluster(options.ShouldStartCluster()) + node.configController = node.configControllerBuilder.Create() + + stateCoordinator := NewClusterNodeStateCoordinator(&NodeCoordinatorFacade{ node: node }, nil) + node.configController.OnLocalUpdates(func(deltas []ClusterStateDelta) { + stateCoordinator.ProcessClusterUpdates(deltas) + }) + + node.configController.OnClusterSnapshot(func(snapshotIndex uint64, snapshotId string) { + node.localSnapshot(snapshotIndex, snapshotId) + }) + + node.configController.Start() + defer node.Stop() + + if node.configController.ClusterController().LocalNodeWasRemovedFromCluster() { + Log.Errorf("Local node (id = %d) unable to start because it was removed from the cluster", nodeID) + + return ERemoved + } + + // It is important to initialize node before networking starts + // to ensure no cluster config state changes occur while initialize is being called. + // Initialize needs to set up transfers and partitions with the node's last known + // state before changes to its partitions ownership and partition transfers + // occur + node.transferAgent = NewDefaultHTTPTransferAgent(node.configController, node.partitionPool) + node.clusterioAgent = clusterio.NewAgent(NewNodeClient(node, node.configController), NewPartitionResolver(node.configController)) + + if options.SyncPeriod < 1000 { + options.SyncPeriod = 1000 + } + + bucketProxyFactory := &ddbSync.CloudBucketProxyFactory{ + Client: *node.interClusterClient, + ClusterController: node.configController.ClusterController(), + PartitionPool: node.partitionPool, + ClusterIOAgent: node.clusterioAgent, + } + syncController := NewSyncController(options.SyncMaxSessions, bucketProxyFactory, ddbSync.NewMultiSyncScheduler(time.Millisecond * time.Duration(options.SyncPeriod)), options.SyncPathLimit) + node.hub = NewHub("", syncController, nil) + + stateCoordinator.InitializeNodeState() + + node.hub.SyncController().Start() + serverStopResult := node.startNetworking() + decommission, err := node.raftStore.IsDecommissioning() + + if err != nil { + Log.Criticalf("Local node (id = %d) unable to start up since it could not check the decomissioning flag: %v", nodeID, err.Error()) + + return err + } + + if decommission { + Log.Infof("Local node (id = %d) will resume decommissioning process", nodeID) + + err, result := node.LeaveCluster() + + if err != nil { + Log.Criticalf("Local node (id = %d) unable to resume decommissioning process: %v", nodeID, err.Error()) + + return err + } + + return <-result + } + + if !node.configController.ClusterController().LocalNodeIsInCluster() || !node.configController.ClusterController().State.ClusterSettings.AreInitialized() { + if options.ShouldJoinCluster() { + seedHost, seedPort := options.SeedNode() + + Log.Infof("Local node (id = %d) joining existing cluster. Seed node at %s:%d", nodeID, seedHost, seedPort) + + if err := node.joinCluster(seedHost, seedPort); err != nil { + Log.Criticalf("Local node (id = %d) unable to join cluster: %v", nodeID, err.Error()) + + return err + } + } else { + Log.Infof("Local node (id = %d) creating new cluster...", nodeID) + + if err := node.initializeCluster(options.ClusterSettings); err != nil { + Log.Criticalf("Local node (id = %d) unable to create new cluster: %v", nodeID, err.Error()) + + return err + } + } + } + + node.notifyInitialized() + + select { + case <-node.leftCluster: + Log.Infof("Local node (id = %d) shutting down...", nodeID) + return ERemoved + case err := <-serverStopResult: + Log.Errorf("Local node (id = %d) stopped with error: %v", nodeID, err.Error()) + return err + case <-node.shutdown: + return nil + } +} + +func (node *ClusterNode) notifyInitialized() { + if node.initializedCB != nil { + node.initializedCB() + } +} + +func (node *ClusterNode) OnInitialized(cb func()) { + node.initializedCB = cb +} + +func (node *ClusterNode) ClusterConfigController() ClusterConfigController { + return node.configController +} + +func (node *ClusterNode) openStorageDriver() error { + if err := node.storageDriver.Open(); err != nil { + if err != ECorrupted { + Log.Criticalf("Error opening storage driver: %v", err.Error()) + + return EStorage + } + + Log.Error("Database is corrupted. Attempting automatic recovery now...") + + recoverError := node.recover() + + if recoverError != nil { + Log.Criticalf("Unable to recover corrupted database. Reason: %v", recoverError.Error()) + Log.Critical("Database daemon will now exit") + + return EStorage + } + } + + return nil +} + +func (node *ClusterNode) recover() error { + recoverError := node.storageDriver.Recover() + + if recoverError != nil { + Log.Criticalf("Unable to recover corrupted database. Reason: %v", recoverError.Error()) + + return EStorage + } + + return nil +} + +func (node *ClusterNode) startNetworking() <-chan error { + router := node.cloudServer.Router() + clusterEndpoint := &ClusterEndpoint{ ClusterFacade: &ClusterNodeFacade{ node: node } } + partitionsEndpoint := &PartitionsEndpoint{ ClusterFacade: &ClusterNodeFacade{ node: node } } + relaysEndpoint := &RelaysEndpoint{ ClusterFacade: &ClusterNodeFacade{ node: node } } + sitesEndpoint := &SitesEndpoint{ ClusterFacade: &ClusterNodeFacade{ node: node } } + syncEndpoint := &SyncEndpoint{ ClusterFacade: &ClusterNodeFacade{ node: node }, Upgrader: websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024 } } + logDumEndpoint := &LogDumpEndpoint{ ClusterFacade: &ClusterNodeFacade{ node: node } } + snapshotEndpoint := &SnapshotEndpoint{ ClusterFacade: &ClusterNodeFacade{ node: node } } + profileEndpoint := &ProfilerEndpoint{ } + prometheusEndpoint := &PrometheusEndpoint{ } + merkleSyncEndpoint := &ddbSync.BucketSyncHTTP{ PartitionPool: node.partitionPool, ClusterConfigController: node.configController } + kubernetesEndpoint := &KubernetesEndpoint{ } + + node.raftTransport.Attach(router) + node.transferAgent.(*HTTPTransferAgent).Attach(router) + clusterEndpoint.Attach(router) + partitionsEndpoint.Attach(router) + relaysEndpoint.Attach(router) + // Note: Need to have merkleSyncEndpoint before sitesEndpoint + // since sitesEndpoint sets up a PrefixPath route for /sites/ + // which is a prefix the merkleSyncEndpoints share. + merkleSyncEndpoint.Attach(router) + sitesEndpoint.Attach(router) + syncEndpoint.Attach(router) + logDumEndpoint.Attach(router) + snapshotEndpoint.Attach(router) + profileEndpoint.Attach(router) + prometheusEndpoint.Attach(router) + kubernetesEndpoint.Attach(router) + + startResult := make(chan error) + + go func() { + startResult <- node.cloudServer.Start() + }() + + return startResult +} + +func (node *ClusterNode) sitePool(partitionNumber uint64) SitePool { + storageDriver := NewPrefixedStorageDriver(node.sitePoolStorePrefix(partitionNumber), node.storageDriver) + siteFactory := &CloudSiteFactory{ NodeID: node.Name(), MerkleDepth: node.merkleDepth, StorageDriver: storageDriver } + + return &CloudNodeSitePool{ SiteFactory: siteFactory } +} + +func (node *ClusterNode) sitePoolStorePrefix(partitionNumber uint64) []byte { + prefix := make([]byte, 9) + + prefix[0] = SiteStoreStoragePrefix + binary.BigEndian.PutUint64(prefix[1:], partitionNumber) + + return prefix +} + +func (node *ClusterNode) Stop() { + node.lock.Lock() + defer node.lock.Unlock() + + node.stop() +} + +func (node *ClusterNode) stop() { + node.storageDriver.Close() + node.configController.Stop() + node.cloudServer.Stop() + + if node.shutdownDecommissioner != nil { + node.shutdownDecommissioner() + } + + if node.isRunning { + node.isRunning = false + close(node.shutdown) + } +} + +func (node *ClusterNode) ID() uint64 { + return node.configController.ClusterController().LocalNodeID +} + +func (node *ClusterNode) Name() string { + return "cloud-" + fmt.Sprintf("%d", node.ID()) +} + +func (node *ClusterNode) initializeCluster(settings ClusterSettings) error { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + select { + case <-ctx.Done(): + return + case <-node.shutdown: + cancel() + return + } + }() + + Log.Infof("Local node (id = %d) initializing cluster settings (replication_factor = %d, partitions = %d)", node.ID(), settings.ReplicationFactor, settings.Partitions) + + if err := node.configController.ClusterCommand(ctx, ClusterSetReplicationFactorBody{ ReplicationFactor: settings.ReplicationFactor }); err != nil { + Log.Criticalf("Local node (id = %d) was unable to initialize the replication factor of the new cluster: %v", node.ID(), err.Error()) + + return err + } + + if err := node.configController.ClusterCommand(ctx, ClusterSetPartitionCountBody{ Partitions: settings.Partitions }); err != nil { + Log.Criticalf("Local node (id = %d) was unable to initialize the partition count factor of the new cluster: %v", node.ID(), err.Error()) + + return err + } + + Log.Infof("Cluster initialization complete!") + + return nil +} + +func (node *ClusterNode) joinCluster(seedHost string, seedPort int) error { + node.raftTransport.SetDefaultRoute(seedHost, seedPort) + + memberAddress := PeerAddress{ + Host: seedHost, + Port: seedPort, + } + + newMemberConfig := NodeConfig{ + Capacity: node.capacity, + Address: PeerAddress{ + NodeID: node.ID(), + Host: node.cloudServer.InternalHost(), + Port: node.cloudServer.InternalPort(), + }, + } + + for { + ctx, cancel := context.WithCancel(context.Background()) + wasAdded := false + stopped := make(chan int) + + // run a goroutine in the background to + // cancel running add node request when + // this node is shut down + go func() { + defer func() { stopped <- 1 }() + + for { + select { + case <-node.joinedCluster: + wasAdded = true + cancel() + return + case <-ctx.Done(): + return + case <-node.shutdown: + cancel() + return + } + } + }() + + Log.Infof("Local node (id = %d) is trying to join a cluster through an existing cluster member at %s:%d", node.ID(), seedHost, seedPort) + err := node.interClusterClient.AddNode(ctx, memberAddress, newMemberConfig) + + // Cancel to ensure the goroutine gets cleaned up + cancel() + + // Ensure that the above goroutine has exited and there are no new updates to consume + <-stopped + + if wasAdded { + return nil + } + + if _, ok := err.(DBerror); ok { + if err.(DBerror) == EDuplicateNodeID { + Log.Criticalf("Local node (id = %d) request to join the cluster failed because its ID is not unique. This may indicate that the node is trying to use a duplicate ID or it may indicate that a previous proposal that this node made was already accepted and it just hasn't heard about it yet.", node.ID()) + Log.Criticalf("Local node (id = %d) will now wait one minute to see if it is part of the cluster. If it receives no messages it will shut down", node.ID()) + + select { + case <-node.joinedCluster: + return nil + case <-node.shutdown: + return EStopped + case <-time.After(time.Minute): + return EDuplicateNodeID + } + } + } + + if err != nil { + Log.Errorf("Local node (id = %d) encountered an error while trying to join cluster: %v", node.ID(), err.Error()) + Log.Infof("Local node (id = %d) will try to join the cluster again in %d seconds", node.ID(), ClusterJoinRetryTimeout) + + select { + case <-node.joinedCluster: + // The node has been added to the cluster. The AddNode() request may + // have been successfully submitted but the response just didn't make + // it to this node, but it worked. No need to retry joining + return nil + case <-node.shutdown: + return EStopped + case <-time.After(time.Second * ClusterJoinRetryTimeout): + continue + } + } + + select { + case <-node.joinedCluster: + return nil + case <-node.shutdown: + return EStopped + } + } +} + +func (node *ClusterNode) LeaveCluster() (error, <-chan error) { + node.lock.Lock() + defer node.lock.Unlock() + + node.waitForEmpty() + + // allow at mot one decommissioner + if node.shutdownDecommissioner != nil { + return nil, node.leftClusterResult + } + + Log.Infof("Local node (id = %d) is being put into decommissioning mode", node.ID()) + + if err := node.raftStore.SetDecommissioningFlag(); err != nil { + Log.Errorf("Local node (id = %d) was unable to be put into decommissioning mode: %v", node.ID(), err.Error()) + + return err, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + node.shutdownDecommissioner = cancel + node.leftClusterResult = make(chan error, 1) + + go func() { + node.leftClusterResult <- node.decommission(ctx) + }() + + return nil, node.leftClusterResult +} + +func (node *ClusterNode) waitForEmpty() { + node.emptyMu.Lock() + defer node.emptyMu.Unlock() + + node.empty = make(chan int, 1) +} + +func (node *ClusterNode) notifyEmpty() { + node.emptyMu.Lock() + defer node.emptyMu.Unlock() + + if node.empty != nil { + node.empty <- 1 + } +} + +func (node *ClusterNode) decommission(ctx context.Context) error { + Log.Infof("Local node (id = %d) starting decommissioning process...", node.ID()) + + localNodeConfig := node.configController.ClusterController().LocalNodeConfig() + + if localNodeConfig == nil { + Log.Criticalf("Local node (id = %d) unable to continue decommissioning process since its node config is not in the cluster config", node.ID()) + + return ERemoved + } + + if localNodeConfig.Capacity != 0 { + Log.Infof("Local node (id = %d) decommissioning (1/4): Giving up tokens...", node.ID()) + + if err := node.configController.ClusterCommand(ctx, ClusterUpdateNodeBody{ NodeID: node.ID(), NodeConfig: NodeConfig{ Capacity: 0, Address: localNodeConfig.Address } }); err != nil { + Log.Criticalf("Local node (id = %d) was unable to give up its tokens: %v", node.ID(), err.Error()) + + return err + } + } + + // Transfers should be stopped anyway once the capacity is set to zero and this node no longer owns + // any tokens but call it here to make sure all have stopped by this point. + node.transferAgent.StopAllTransfers() + heldPartitionReplicas := node.configController.ClusterController().LocalNodeHeldPartitionReplicas() + + if len(heldPartitionReplicas) > 0 { + Log.Infof("Local node (id = %d) decommissioning (2/4): Locking partitions...", node.ID()) + + // Write lock partitions that are still held. This should occur anyway since + // The node no longer owns these partitions but calling it here ensures this + // invariant holds for the next steps of the decommissioning process + for _, partitionReplica := range heldPartitionReplicas { + partition := node.partitionPool.Get(partitionReplica.Partition) + + if partition != nil { + Log.Debugf("Local node (id = %d) decommissioning (2/4): Write locking partition %d", node.ID(), partition.Partition()) + + node.transferAgent.EnableOutgoingTransfers(partition.Partition()) + partition.LockWrites() + } + } + + Log.Infof("Local node (id = %d) decommissioning (3/4): Transferring partition data...", node.ID()) + + // Wait for all partition data to be transferred away from this node. This ensures that + // the data that this node held is replicated elsewhere before it removes itself from the + // cluster permanently. + select { + case <-node.leftCluster: + return ERemoved + case <-node.empty: + case <-ctx.Done(): + return ECancelled + } + } + + Log.Infof("Local node (id = %d) decommissioning (4/4): Leaving cluster...", node.ID()) + + if err := node.configController.RemoveNode(ctx, node.ID()); err != nil { + Log.Criticalf("Local node (id = %d) was unable to leave cluster: %v", node.ID(), err.Error()) + + return err + } + + return EDecommissioned +} + +func (node *ClusterNode) Batch(ctx context.Context, partitionNumber uint64, siteID string, bucketName string, updateBatch *UpdateBatch) (map[string]*SiblingSet, error) { + partition := node.partitionPool.Get(partitionNumber) + + if partition == nil { + return nil, ENoSuchPartition + } + + site := partition.Sites().Acquire(siteID) + + if site == nil { + return nil, ENoSuchSite + } + + bucket := site.Buckets().Get(bucketName) + + if bucket == nil { + return nil, ENoSuchBucket + } + + if !node.configController.ClusterController().LocalNodeHoldsPartition(partitionNumber) { + return nil, ENoQuorum + } + + patch, err := bucket.Batch(updateBatch) + + if err != nil { + return nil, err + } + + node.hub.BroadcastUpdate(siteID, bucketName, patch, 10) + + return patch, nil +} + +func (node *ClusterNode) Merge(ctx context.Context, partitionNumber uint64, siteID string, bucketName string, patch map[string]*SiblingSet, broadcastToRelays bool) error { + partition := node.partitionPool.Get(partitionNumber) + + if partition == nil { + return ENoSuchPartition + } + + site := partition.Sites().Acquire(siteID) + + if site == nil { + return ENoSuchSite + } + + bucket := site.Buckets().Get(bucketName) + + if bucket == nil { + return ENoSuchBucket + } + + err := bucket.Merge(patch) + + if err != nil { + return err + } + + if !node.configController.ClusterController().LocalNodeHoldsPartition(partitionNumber) { + return ENoQuorum + } + + if broadcastToRelays { + node.hub.BroadcastUpdate(siteID, bucketName, patch, 10) + } + + return nil +} + +func (node *ClusterNode) Get(ctx context.Context, partitionNumber uint64, siteID string, bucketName string, keys [][]byte) ([]*SiblingSet, error) { + partition := node.partitionPool.Get(partitionNumber) + + if partition == nil { + return nil, ENoSuchPartition + } + + site := partition.Sites().Acquire(siteID) + + if site == nil { + return nil, ENoSuchSite + } + + bucket := site.Buckets().Get(bucketName) + + if bucket == nil { + return nil, ENoSuchBucket + } + + return bucket.Get(keys) +} + +func (node *ClusterNode) GetMatches(ctx context.Context, partitionNumber uint64, siteID string, bucketName string, keys [][]byte) (SiblingSetIterator, error) { + partition := node.partitionPool.Get(partitionNumber) + + if partition == nil { + return nil, ENoSuchPartition + } + + site := partition.Sites().Acquire(siteID) + + if site == nil { + return nil, ENoSuchSite + } + + bucket := site.Buckets().Get(bucketName) + + if bucket == nil { + return nil, ENoSuchBucket + } + + return bucket.GetMatches(keys) +} + +func (node *ClusterNode) AcceptRelayConnection(conn *websocket.Conn, header http.Header) { + node.relayConnectionsMu.Lock() + defer node.relayConnectionsMu.Unlock() + + var relayID string + + if _, ok := conn.UnderlyingConn().(*tls.Conn); !ok { + if header.Get("X-WigWag-RelayID") == "" { + Log.Warningf("Cannot accept non-secure relay connections. Must use TLS") + + conn.Close() + + return + } + + relayID = header.Get("X-WigWag-RelayID") + } else { + var err error + relayID, err = node.hub.ExtractPeerID(conn.UnderlyingConn().(*tls.Conn)) + + if err != nil && !node.noValidate { + Log.Warningf("Cannot accept connection from relay because it provided an invalid client cert.") + + conn.Close() + + return + } + + if node.noValidate && header.Get("X-WigWag-RelayID") != "" { + relayID = header.Get("X-WigWag-RelayID") + } + } + + siteID := node.configController.ClusterController().RelaySite(relayID) + + if siteID == "" { + Log.Warningf("Unable to accept connection from relay %s because it has either not been added to the devicedb relay database or it does not belong to a site", relayID) + + conn.Close() + + return + } + + partitionNumber := node.configController.ClusterController().Partition(siteID) + owners := node.configController.ClusterController().PartitionOwners(partitionNumber) + + if len(owners) == 0 { + Log.Warningf("Unable to accept connection from relay %s because no node owns the partition for its site, site %s", relayID, siteID) + + conn.Close() + + return + } + + for _, nodeID := range owners { + if nodeID == node.configController.ClusterController().LocalNodeID { + Log.Infof("Local node (id = %d) accepting connection from relay %s which belongs to site %s", nodeID, relayID, siteID) + + // The local node owns this site database. It can accept the connection for this relay + node.hub.Accept(conn, partitionNumber, relayID, siteID, node.noValidate) + + return + } + } + + // Can only proxy wss -> ws + //if _, ok := conn.UnderlyingConn().(*tls.Conn); !ok { + // Log.Warningf("Local node (id = %d) cannot accept proxied connection from relay %s because it does not own the partition to which its site, site %s, belongs", node.configController.ClusterController().LocalNodeID, relayID, siteID) + + // conn.Close() + + // return + //} + + // The local node does not own the site database for this site. It should proxy the connection to one of the owners + nodeID := owners[int(rand.Uint32() % uint32(len(owners)))] + + Log.Infof("Local node (id = %d) proxying connection from relay %s which belongs to site %s to node %d", node.configController.ClusterController().LocalNodeID, relayID, siteID, nodeID) + node.proxyRelayConnection(nodeID, relayID, conn) +} + +func (node *ClusterNode) proxyRelayConnection(nodeID uint64, relayID string, conn *websocket.Conn) { + var dialer *websocket.Dialer = websocket.DefaultDialer + + header := http.Header{} + header.Set("X-WigWag-RelayID", relayID) + nodeAddress := node.ClusterConfigController().ClusterController().ClusterMemberAddress(nodeID) + connBackend, _, err := dialer.Dial(fmt.Sprintf("ws://%s:%d/sync", nodeAddress.Host, nodeAddress.Port), header) + + if err != nil { + Log.Warningf("Unable to proxy connection to node %d: %v", nodeID, err) + + conn.Close() + + return + } + + errors := make(chan error, 2) + cp := func(dest io.Writer, src io.Reader) { + _, err := io.Copy(dest, src) + errors <- err + } + + go cp(connBackend.UnderlyingConn(), conn.UnderlyingConn()) + go cp(conn.UnderlyingConn(), connBackend.UnderlyingConn()) + + go func() { + err := <-errors + Log.Infof("Closing proxied connection: %v", err) + + conn.Close() + connBackend.Close() + }() +} + +func (node *ClusterNode) DisconnectRelay(relayID string) { + node.relayConnectionsMu.Lock() + defer node.relayConnectionsMu.Unlock() + + node.hub.ReconnectPeer(relayID) +} + +func (node *ClusterNode) DisconnectRelayBySite(siteID string) { + node.relayConnectionsMu.Lock() + defer node.relayConnectionsMu.Unlock() + + node.hub.ReconnectPeerBySite(siteID) +} + +func (node *ClusterNode) DisconnectRelayByPartition(partitionNumber uint64) { + node.relayConnectionsMu.Lock() + defer node.relayConnectionsMu.Unlock() + + node.hub.ReconnectPeerByPartition(partitionNumber) +} + +func (node *ClusterNode) ClusterIO() clusterio.ClusterIOAgent { + return node.clusterioAgent +} + +func (node *ClusterNode) RelayStatus(relayID string) (RelayStatus, error) { + var status RelayStatus + + _, relayAdded := node.configController.ClusterController().State.Relays[relayID] + + if !relayAdded { + return RelayStatus{}, ERelayDoesNotExist + } + + connected, ping := node.hub.PeerStatus(relayID) + + status.Connected = connected + status.Ping = ping + status.ConnectedTo = node.ID() + status.Site = node.configController.ClusterController().RelaySite(relayID) + + return status, nil +} + +func (node *ClusterNode) localSnapshot(snapshotIndex uint64, snapshotId string) error { + return node.snapshotter.Snapshot(snapshotIndex, snapshotId) +} + +type ClusterNodeFacade struct { + node *ClusterNode +} + +func (clusterFacade *ClusterNodeFacade) AddNode(ctx context.Context, nodeConfig NodeConfig) error { + return clusterFacade.node.configController.AddNode(ctx, nodeConfig) +} + +func (clusterFacade *ClusterNodeFacade) RemoveNode(ctx context.Context, nodeID uint64) error { + return clusterFacade.node.configController.RemoveNode(ctx, nodeID) +} + +func (clusterFacade *ClusterNodeFacade) ReplaceNode(ctx context.Context, nodeID uint64, replacementNodeID uint64) error { + return clusterFacade.node.configController.ReplaceNode(ctx, nodeID, replacementNodeID) +} + +func (clusterFacade *ClusterNodeFacade) ClusterClient() *client.Client { + return clusterFacade.node.interClusterClient +} + +func (clusterFacade *ClusterNodeFacade) Decommission() error { + err, _ := clusterFacade.node.LeaveCluster() + + return err +} + +func (clusterFacade *ClusterNodeFacade) DecommissionPeer(nodeID uint64) error { + peerAddress := clusterFacade.PeerAddress(nodeID) + + if peerAddress.IsEmpty() { + return errors.New("No address for peer") + } + + return clusterFacade.node.interClusterClient.RemoveNode(context.TODO(), peerAddress, nodeID, 0, true, true) +} + +func (clusterFacade *ClusterNodeFacade) LocalNodeID() uint64 { + return clusterFacade.node.ID() +} + +func (clusterFacade *ClusterNodeFacade) PeerAddress(nodeID uint64) PeerAddress { + return clusterFacade.node.configController.ClusterController().ClusterMemberAddress(nodeID) +} + +func (clusterFacade *ClusterNodeFacade) Batch(siteID string, bucket string, updateBatch *UpdateBatch) (BatchResult, error) { + replicas, nApplied, err := clusterFacade.node.clusterioAgent.Batch(context.TODO(), siteID, bucket, updateBatch) + + if err == ESiteDoesNotExist { + return BatchResult{}, ENoSuchSite + } + + if err == EBucketDoesNotExist { + return BatchResult{}, ENoSuchBucket + } + + return BatchResult{ + Replicas: uint64(replicas), + NApplied: uint64(nApplied), + }, err +} + +func (clusterFacade *ClusterNodeFacade) Get(siteID string, bucket string, keys [][]byte) ([]*SiblingSet, error) { + siblingSets, err := clusterFacade.node.clusterioAgent.Get(context.TODO(), siteID, bucket, keys) + + if err == ESiteDoesNotExist { + return nil, ENoSuchSite + } + + if err == EBucketDoesNotExist { + return nil, ENoSuchBucket + } + + if err != nil { + return nil, err + } + + return siblingSets, nil +} + +func (clusterFacade *ClusterNodeFacade) GetMatches(siteID string, bucket string, keys [][]byte) (SiblingSetIterator, error) { + iter, err := clusterFacade.node.clusterioAgent.GetMatches(context.TODO(), siteID, bucket, keys) + + if err == ESiteDoesNotExist { + return nil, ENoSuchSite + } + + if err == EBucketDoesNotExist { + return nil, ENoSuchBucket + } + + if err != nil { + return nil, err + } + + return iter, nil +} + +func (clusterFacade *ClusterNodeFacade) LocalGetMatches(partitionNumber uint64, siteID string, bucketName string, keys [][]byte) (SiblingSetIterator, error) { + return clusterFacade.node.GetMatches(context.TODO(), partitionNumber, siteID, bucketName, keys) +} + +func (clusterFacade *ClusterNodeFacade) LocalGet(partitionNumber uint64, siteID string, bucketName string, keys [][]byte) ([]*SiblingSet, error) { + return clusterFacade.node.Get(context.TODO(), partitionNumber, siteID, bucketName, keys) +} + +func (clusterFacade *ClusterNodeFacade) LocalBatch(partitionNumber uint64, siteID string, bucketName string, updateBatch *UpdateBatch) (map[string]*SiblingSet, error) { + return clusterFacade.node.Batch(context.TODO(), partitionNumber, siteID, bucketName, updateBatch) +} + +func (clusterFacade *ClusterNodeFacade) LocalMerge(partitionNumber uint64, siteID string, bucketName string, patch map[string]*SiblingSet, broadcastToRelays bool) error { + return clusterFacade.node.Merge(context.TODO(), partitionNumber, siteID, bucketName, patch, broadcastToRelays) +} + +func (clusterFacade *ClusterNodeFacade) AddRelay(ctx context.Context, relayID string) error { + return clusterFacade.node.configController.ClusterCommand(ctx, ClusterAddRelayBody{ RelayID: relayID }) +} + +func (clusterFacade *ClusterNodeFacade) RemoveRelay(ctx context.Context, relayID string) error { + return clusterFacade.node.configController.ClusterCommand(ctx, ClusterRemoveRelayBody{ RelayID: relayID }) +} + +func (clusterFacade *ClusterNodeFacade) MoveRelay(ctx context.Context, relayID string, siteID string) error { + return clusterFacade.node.configController.ClusterCommand(ctx, ClusterMoveRelayBody{ RelayID: relayID, SiteID: siteID }) +} + +func (clusterFacade *ClusterNodeFacade) AddSite(ctx context.Context, siteID string) error { + return clusterFacade.node.configController.ClusterCommand(ctx, ClusterAddSiteBody{ SiteID: siteID }) +} + +func (clusterFacade *ClusterNodeFacade) RemoveSite(ctx context.Context, siteID string) error { + return clusterFacade.node.configController.ClusterCommand(ctx, ClusterRemoveSiteBody{ SiteID: siteID }) +} + +func (clusterFacade *ClusterNodeFacade) AcceptRelayConnection(conn *websocket.Conn, header http.Header) { + clusterFacade.node.AcceptRelayConnection(conn, header) +} + +func (clusterFacade *ClusterNodeFacade) ClusterNodes() []NodeConfig { + var nodeConfigs []NodeConfig = clusterFacade.node.configController.ClusterController().ClusterNodeConfigs() + + for i, nodeConfig := range nodeConfigs { + nodeConfigs[i] = NodeConfig{ + Address: nodeConfig.Address, + Capacity: nodeConfig.Capacity, + } + } + + return nodeConfigs +} + +func (clusterFacade *ClusterNodeFacade) ClusterSettings() ClusterSettings { + return clusterFacade.node.configController.ClusterController().State.ClusterSettings +} + +func (clusterFacade *ClusterNodeFacade) PartitionDistribution() [][]uint64 { + var partitionDistribution [][]uint64 = make([][]uint64, clusterFacade.node.configController.ClusterController().State.ClusterSettings.Partitions) + + for partition, _ := range partitionDistribution { + partitionDistribution[partition] = clusterFacade.node.configController.ClusterController().PartitionOwners(uint64(partition)) + } + + return partitionDistribution +} + +func (clusterFacade *ClusterNodeFacade) TokenAssignments() []uint64 { + return clusterFacade.node.configController.ClusterController().State.Tokens +} + +func (clusterFacade *ClusterNodeFacade) GetRelayStatus(ctx context.Context, relayID string) (RelayStatus, error) { + var siteID string = clusterFacade.node.configController.ClusterController().RelaySite(relayID) + + return clusterFacade.node.clusterioAgent.RelayStatus(ctx, siteID, relayID) +} + +func (clusterFacade *ClusterNodeFacade) LocalGetRelayStatus(relayID string) (RelayStatus, error) { + return clusterFacade.node.RelayStatus(relayID) +} + +func (clusterFacade *ClusterNodeFacade) LocalLogDump() (LogDump, error) { + var logDump LogDump + + baseSnapshot, entries, err := clusterFacade.node.configController.LogDump() + + if err != nil { + Log.Errorf("Error while retrieving log dump: %v", err.Error()) + + return LogDump{}, err + } + + if !raft.IsEmptySnap(baseSnapshot) { + var clusterState ClusterState + + err = clusterState.Recover(baseSnapshot.Data) + + if err != nil { + Log.Errorf("Error while parsing base cluster snapshot: %v", err.Error()) + + return LogDump{}, err + } + + logDump.BaseSnapshot.Index = baseSnapshot.Metadata.Index + logDump.BaseSnapshot.State = clusterState + } + + logDump.Entries = make([]LogEntry, 0, len(entries)) + + for _, entry := range entries { + var logEntry LogEntry + + logEntry.Index = entry.Index + + switch entry.Type { + case raftpb.EntryConfChange: + var confChange raftpb.ConfChange + var err error + + if err := confChange.Unmarshal(entry.Data); err != nil { + Log.Errorf("Error while parsing committed entry: %v", err.Error()) + + return LogDump{}, err + } + + clusterCommand, err := DecodeClusterCommand(confChange.Context) + + if err != nil { + Log.Errorf("Error while parsing committed entry: %v", err.Error()) + + return LogDump{}, err + } + + logEntry.Command = clusterCommand + case raftpb.EntryNormal: + if len(entry.Data) == 0 { + continue + } + + clusterCommand, err := DecodeClusterCommand(entry.Data) + + if err != nil { + Log.Errorf("Error while parsing committed entry: %v", err.Error()) + + return LogDump{}, err + } + + logEntry.Command = clusterCommand + } + + logDump.Entries = append(logDump.Entries, logEntry) + } + + return logDump, nil +} + +func (clusterFacade *ClusterNodeFacade) ClusterSnapshot(ctx context.Context) (Snapshot, error) { + snapshotId, err := UUID() + + if err != nil { + return Snapshot{}, err + } + + if err := clusterFacade.node.configController.ClusterCommand(ctx, ClusterSnapshotBody{ UUID: snapshotId }); err != nil { + return Snapshot{}, err + } + + return Snapshot{UUID: snapshotId}, nil +} + +func (clusterFacade *ClusterNodeFacade) CheckLocalSnapshotStatus(snapshotId string) error { + return clusterFacade.node.snapshotter.CheckSnapshotStatus(snapshotId) +} + +func (clusterFacade *ClusterNodeFacade) WriteLocalSnapshot(snapshotId string, w io.Writer) error { + return clusterFacade.node.snapshotter.WriteSnapshot(snapshotId, w) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/node/clusterio_node_client.go b/vendor/github.com/armPelionEdge/devicedb/node/clusterio_node_client.go new file mode 100644 index 0000000..26f8c6f --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/clusterio_node_client.go @@ -0,0 +1,457 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/raft" + . "github.com/armPelionEdge/devicedb/routes" +) + +type NodeClient struct { + configController ClusterConfigController + localNode Node + httpClient *http.Client +} + +func NewNodeClient(localNode Node, configController ClusterConfigController) *NodeClient { + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{} + transport.MaxIdleConns = 0 + transport.MaxIdleConnsPerHost = 1000 + transport.IdleConnTimeout = defaultTransport.IdleConnTimeout + + return &NodeClient{ + configController: configController, + localNode: localNode, + httpClient: &http.Client{ Transport: transport }, + } +} + +func (nodeClient *NodeClient) Merge(ctx context.Context, nodeID uint64, partition uint64, siteID string, bucket string, patch map[string]*SiblingSet, broadcastToRelays bool) error { + var nodeAddress PeerAddress = nodeClient.configController.ClusterController().ClusterMemberAddress(nodeID) + + if nodeAddress.IsEmpty() { + return ENoSuchNode + } + + if nodeID == nodeClient.localNode.ID() { + err := nodeClient.localNode.Merge(ctx, partition, siteID, bucket, patch, broadcastToRelays) + + switch err { + case ENoSuchBucket: + return EBucketDoesNotExist + case ENoSuchSite: + return ESiteDoesNotExist + case nil: + return nil + default: + return err + } + } + + encodedPatch, err := json.Marshal(patch) + + if err != nil { + return err + } + + broadcastQuery := "" + + if broadcastToRelays { + broadcastQuery = "?broadcast=true" + } + + status, body, err := nodeClient.sendRequest(ctx, "POST", fmt.Sprintf("http://%s:%d/partitions/%d/sites/%s/buckets/%s/merges%s", nodeAddress.Host, nodeAddress.Port, partition, siteID, bucket, broadcastQuery), encodedPatch) + + if err != nil { + return err + } + + switch status { + case 404: + dbErr, err := DBErrorFromJSON(body) + + if err != nil { + return err + } + + return dbErr + case 200: + var batchResult BatchResult + + if err := json.Unmarshal(body, &batchResult); err != nil { + return err + } + + if batchResult.NApplied == 0 { + return ENoQuorum + } + + return nil + default: + Log.Warningf("Merge request to node %d for partition %d at site %s and bucket %s received a %d status code", nodeID, partition, siteID, bucket, status) + + return EStorage + } +} + +func (nodeClient *NodeClient) Batch(ctx context.Context, nodeID uint64, partition uint64, siteID string, bucket string, updateBatch *UpdateBatch) (map[string]*SiblingSet, error) { + var nodeAddress PeerAddress = nodeClient.configController.ClusterController().ClusterMemberAddress(nodeID) + + if nodeAddress.IsEmpty() { + return nil, ENoSuchNode + } + + if nodeID == nodeClient.localNode.ID() { + patch, err := nodeClient.localNode.Batch(ctx, partition, siteID, bucket, updateBatch) + + switch err { + case ENoSuchBucket: + return nil, EBucketDoesNotExist + case ENoSuchSite: + return nil, ESiteDoesNotExist + case nil: + return patch, nil + default: + return nil, err + } + } + + encodedUpdateBatch, err := updateBatch.ToJSON() + + if err != nil { + return nil, err + } + + status, body, err := nodeClient.sendRequest(ctx, "POST", fmt.Sprintf("http://%s:%d/partitions/%d/sites/%s/buckets/%s/batches", nodeAddress.Host, nodeAddress.Port, partition, siteID, bucket), encodedUpdateBatch) + + if err != nil { + return nil, err + } + + switch status { + case 404: + dbErr, err := DBErrorFromJSON(body) + + if err != nil { + return nil, err + } + + return nil, dbErr + case 200: + var batchResult BatchResult + + if err := json.Unmarshal(body, &batchResult); err != nil { + return nil, err + } + + if batchResult.NApplied == 0 { + return nil, ENoQuorum + } + + return batchResult.Patch, nil + default: + Log.Warningf("Batch request to node %d for partition %d at site %s and bucket %s received a %d status code", nodeID, partition, siteID, bucket, status) + + return nil, EStorage + } +} + +func (nodeClient *NodeClient) Get(ctx context.Context, nodeID uint64, partition uint64, siteID string, bucket string, keys [][]byte) ([]*SiblingSet, error) { + var nodeAddress PeerAddress = nodeClient.configController.ClusterController().ClusterMemberAddress(nodeID) + + if nodeAddress.IsEmpty() { + return nil, ENoSuchNode + } + + if nodeID == nodeClient.localNode.ID() { + siblingSets, err := nodeClient.localNode.Get(ctx, partition, siteID, bucket, keys) + + switch err { + case ENoSuchBucket: + return nil, EBucketDoesNotExist + case ENoSuchSite: + return nil, ESiteDoesNotExist + case nil: + return siblingSets, nil + default: + return nil, err + } + } + + var queryString string + + for i, key := range keys { + queryString += "key=" + url.QueryEscape(string(key)) + + if i != len(keys) - 1 { + queryString += "&" + } + } + + status, body, err := nodeClient.sendRequest(ctx, "GET", fmt.Sprintf("http://%s:%d/partitions/%d/sites/%s/buckets/%s/keys?%s", nodeAddress.Host, nodeAddress.Port, partition, siteID, bucket, queryString), nil) + + if err != nil { + return nil, err + } + + switch status { + case 404: + dbErr, err := DBErrorFromJSON(body) + + if err != nil { + return nil, err + } + + return nil, dbErr + case 200: + default: + Log.Warningf("Get request to node %d for partition %d at site %s and bucket %s received a %d status code", nodeID, partition, siteID, bucket, status) + + return nil, EStorage + } + + var entries []InternalEntry + + err = json.Unmarshal(body, &entries) + + if err != nil { + return nil, err + } + + var siblingSets []*SiblingSet = make([]*SiblingSet, len(entries)) + + for i, entry := range entries { + siblingSets[i] = entry.Siblings + } + + return siblingSets, nil +} + +func (nodeClient *NodeClient) GetMatches(ctx context.Context, nodeID uint64, partition uint64, siteID string, bucket string, keys [][]byte) (SiblingSetIterator, error) { + var nodeAddress PeerAddress = nodeClient.configController.ClusterController().ClusterMemberAddress(nodeID) + + if nodeAddress.IsEmpty() { + return nil, ENoSuchNode + } + + if nodeID == nodeClient.localNode.ID() { + iter, err := nodeClient.localNode.GetMatches(ctx, partition, siteID, bucket, keys) + + switch err { + case ENoSuchBucket: + return nil, EBucketDoesNotExist + case ENoSuchSite: + return nil, ESiteDoesNotExist + case nil: + return iter, nil + default: + return nil, err + } + } + + var queryString string + + for i, key := range keys { + queryString += "prefix=" + url.QueryEscape(string(key)) + + if i != len(keys) - 1 { + queryString += "&" + } + } + + status, body, err := nodeClient.sendRequest(ctx, "GET", fmt.Sprintf("http://%s:%d/partitions/%d/sites/%s/buckets/%s/keys?%s", nodeAddress.Host, nodeAddress.Port, partition, siteID, bucket, queryString), nil) + + if err != nil { + return nil, err + } + + switch status { + case 404: + dbErr, err := DBErrorFromJSON(body) + + if err != nil { + return nil, err + } + + return nil, dbErr + case 200: + default: + Log.Warningf("Get matches request to node %d for partition %d at site %s and bucket %s received a %d status code", nodeID, partition, siteID, bucket, status) + + return nil, EStorage + } + + var entries []InternalEntry + + err = json.Unmarshal(body, &entries) + + if err != nil { + return nil, err + } + + return newInternalEntrySiblingSetIterator(entries), nil +} + +func (nodeClient *NodeClient) RelayStatus(ctx context.Context, nodeID uint64, siteID string, relayID string) (RelayStatus, error) { + var nodeAddress PeerAddress = nodeClient.configController.ClusterController().ClusterMemberAddress(nodeID) + + if nodeAddress.IsEmpty() { + return RelayStatus{}, ENoSuchNode + } + + if nodeID == nodeClient.localNode.ID() { + relayStatus, err := nodeClient.localNode.RelayStatus(relayID) + + switch err { + case nil: + return relayStatus, nil + default: + return RelayStatus{}, err + } + } + + status, body, err := nodeClient.sendRequest(ctx, "GET", fmt.Sprintf("http://%s:%d/relays/%s?local=true", nodeAddress.Host, nodeAddress.Port, relayID), nil) + + if err != nil { + return RelayStatus{}, err + } + + switch status { + case 404: + return RelayStatus{}, ERelayDoesNotExist + case 200: + default: + return RelayStatus{}, EStorage + } + + var relayStatus RelayStatus + + err = json.Unmarshal(body, &relayStatus) + + if err != nil { + return RelayStatus{}, err + } + + return relayStatus, nil +} + +func (nodeClient *NodeClient) LocalNodeID() uint64 { + return nodeClient.configController.ClusterController().LocalNodeID +} + +func (nodeClient *NodeClient) sendRequest(ctx context.Context, httpVerb string, endpointURL string, body []byte) (int, []byte, error) { + request, err := http.NewRequest(httpVerb, endpointURL, bytes.NewReader(body)) + + if err != nil { + return 0, nil, err + } + + request = request.WithContext(ctx) + + resp, err := nodeClient.httpClient.Do(request) + + if err != nil { + return 0, nil, err + } + + defer resp.Body.Close() + + responseBody, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return 0, nil, err + } + + return resp.StatusCode, responseBody, nil +} + +type internalEntrySiblingSetIterator struct { + entries []InternalEntry + currentEntry int +} + +func newInternalEntrySiblingSetIterator(entries []InternalEntry) *internalEntrySiblingSetIterator { + return &internalEntrySiblingSetIterator{ + entries: entries, + currentEntry: -1, + } +} + +func (iter *internalEntrySiblingSetIterator) Next() bool { + if iter.currentEntry < len(iter.entries) { + iter.currentEntry++ + } + + return iter.currentEntry < len(iter.entries) +} + +func (iter *internalEntrySiblingSetIterator) Prefix() []byte { + if iter.currentEntry < 0 || iter.currentEntry >= len(iter.entries) { + return nil + } + + return []byte(iter.entries[iter.currentEntry].Prefix) +} + +func (iter *internalEntrySiblingSetIterator) Key() []byte { + if iter.currentEntry < 0 || iter.currentEntry >= len(iter.entries) { + return nil + } + + return []byte(iter.entries[iter.currentEntry].Key) +} + +func (iter *internalEntrySiblingSetIterator) Value() *SiblingSet { + if iter.currentEntry < 0 || iter.currentEntry >= len(iter.entries) { + return nil + } + + return iter.entries[iter.currentEntry].Siblings +} + +func (iter *internalEntrySiblingSetIterator) LocalVersion() uint64 { + return 0 +} + +func (iter *internalEntrySiblingSetIterator) Release() { +} + +func (iter *internalEntrySiblingSetIterator) Error() error { + return nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/node/clusterio_partition_resolver.go b/vendor/github.com/armPelionEdge/devicedb/node/clusterio_partition_resolver.go new file mode 100644 index 0000000..1e956f2 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/clusterio_partition_resolver.go @@ -0,0 +1,47 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/cluster" +) + +type PartitionResolver struct { + configController ClusterConfigController +} + +func NewPartitionResolver(configController ClusterConfigController) *PartitionResolver { + return &PartitionResolver{ + configController: configController, + } +} + +func (partitionResolver *PartitionResolver) Partition(partitioningKey string) uint64 { + return partitionResolver.configController.ClusterController().Partition(partitioningKey) +} + +func (partitionResolver *PartitionResolver) ReplicaNodes(partition uint64) []uint64 { + return partitionResolver.configController.ClusterController().PartitionOwners(partition) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/node/errors.go b/vendor/github.com/armPelionEdge/devicedb/node/errors.go new file mode 100644 index 0000000..bc3d8ce --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/errors.go @@ -0,0 +1,33 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "errors" +) + +var EDecommissioned = errors.New("") +var ERemoved = errors.New("") +var ESnapshotsNotEnabled = errors.New("No snapshot directory configured") \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/node/node.go b/vendor/github.com/armPelionEdge/devicedb/node/node.go new file mode 100644 index 0000000..4502172 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/node.go @@ -0,0 +1,62 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/routes" +) + +// A Node coordinates interactions between +// internal node components +type Node interface { + // Start up the node. + // Case 1) This node is not yet part of a cluster + // It will use the initialization options to figure out whether it should start a new cluster or join an existing one. + // Case 2) This node is part of a cluster and the decomissioning flag is not set + // It should start up and resume its operations as a member of its cluster. Start will run until Stop is called, + // in which case it will return nil, or until the node is removed from the cluster in which case it returns ERemoved + // or EDecommissioned + // Case 3) This node is part of a cluster and the decomissioning flag is set + // It should start up in decomissioning mode, allowing only operations + // which transfer its partitions to new owners. After it has been removed from the cluster + // Start returns EDecomissioned or ERemoved + // EDecomissioned is returned when the node was removed from the cluster after successfully transferring away all its + // data to other nodes in the cluster + // ERemoved is returned when the node was removed from the cluster before successfully transferring away all its data + // to other nodes in the cluster + ID() uint64 + Start(options NodeInitializationOptions) error + // Shut down the node + Stop() + Batch(ctx context.Context, partition uint64, siteID string, bucket string, updateBatch *UpdateBatch) (map[string]*SiblingSet, error) + Merge(ctx context.Context, partition uint64, siteID string, bucket string, patch map[string]*SiblingSet, broadcastToRelays bool) error + Get(ctx context.Context, partition uint64, siteID string, bucket string, keys [][]byte) ([]*SiblingSet, error) + GetMatches(ctx context.Context, partition uint64, siteID string, bucket string, keys [][]byte) (SiblingSetIterator, error) + RelayStatus(relayID string) (RelayStatus, error) +} diff --git a/vendor/github.com/armPelionEdge/devicedb/node/node_facade.go b/vendor/github.com/armPelionEdge/devicedb/node/node_facade.go new file mode 100644 index 0000000..6c64a04 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/node_facade.go @@ -0,0 +1,191 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type NodeCoordinatorFacade struct { + node *ClusterNode +} + +func (nodeFacade *NodeCoordinatorFacade) ID() uint64 { + return nodeFacade.node.ID() +} + +func (nodeFacade *NodeCoordinatorFacade) AddPartition(partitionNumber uint64) { + if nodeFacade.node.partitionPool.Get(partitionNumber) == nil { + partition := nodeFacade.node.partitionFactory.CreatePartition(partitionNumber, nodeFacade.node.sitePool(partitionNumber)) + + for siteID, _ := range nodeFacade.node.ClusterConfigController().ClusterController().State.Sites { + if nodeFacade.node.configController.ClusterController().Partition(siteID) == partitionNumber { + partition.Sites().Add(siteID) + } + } + + partition.LockReads() + partition.LockWrites() + nodeFacade.node.partitionPool.Add(partition) + } +} + +func (nodeFacade *NodeCoordinatorFacade) RemovePartition(partitionNumber uint64) { + nodeFacade.node.partitionPool.Remove(partitionNumber) +} + +func (nodeFacade *NodeCoordinatorFacade) EnableOutgoingTransfers(partitionNumber uint64) { + nodeFacade.node.transferAgent.EnableOutgoingTransfers(partitionNumber) +} + +func (nodeFacade *NodeCoordinatorFacade) DisableOutgoingTransfers(partitionNumber uint64) { + nodeFacade.node.transferAgent.DisableOutgoingTransfers(partitionNumber) +} + +func (nodeFacade *NodeCoordinatorFacade) StartIncomingTransfer(partitionNumber uint64, replicaNumber uint64) { + nodeFacade.node.transferAgent.StartTransfer(partitionNumber, replicaNumber) +} + +func (nodeFacade *NodeCoordinatorFacade) StopIncomingTransfer(partitionNumber uint64, replicaNumber uint64) { + nodeFacade.node.transferAgent.StopTransfer(partitionNumber, replicaNumber) +} + +func (nodeFacade *NodeCoordinatorFacade) LockPartitionWrites(partitionNumber uint64) { + partition := nodeFacade.node.partitionPool.Get(partitionNumber) + + if partition != nil { + partition.LockWrites() + } +} + +func (nodeFacade *NodeCoordinatorFacade) UnlockPartitionWrites(partitionNumber uint64) { + partition := nodeFacade.node.partitionPool.Get(partitionNumber) + + if partition != nil { + partition.UnlockWrites() + } +} + +func (nodeFacade *NodeCoordinatorFacade) LockPartitionReads(partitionNumber uint64) { + partition := nodeFacade.node.partitionPool.Get(partitionNumber) + + if partition != nil { + partition.LockReads() + } +} + +func (nodeFacade *NodeCoordinatorFacade) UnlockPartitionReads(partitionNumber uint64) { + partition := nodeFacade.node.partitionPool.Get(partitionNumber) + + if partition != nil { + partition.UnlockReads() + } +} + +func (nodeFacade *NodeCoordinatorFacade) AddSite(siteID string) { + partitionNumber := nodeFacade.node.configController.ClusterController().Partition(siteID) + partition := nodeFacade.node.partitionPool.Get(partitionNumber) + + if partition == nil { + return + } + + partition.Sites().Add(siteID) +} + +func (nodeFacade *NodeCoordinatorFacade) RemoveSite(siteID string) { + partitionNumber := nodeFacade.node.configController.ClusterController().Partition(siteID) + partition := nodeFacade.node.partitionPool.Get(partitionNumber) + + if partition == nil { + return + } + + partition.Sites().Remove(siteID) + nodeFacade.node.DisconnectRelayBySite(siteID) +} + +func (nodeFacade *NodeCoordinatorFacade) AddRelay(relayID string) { +} + +func (nodeFacade *NodeCoordinatorFacade) RemoveRelay(relayID string) { + nodeFacade.node.DisconnectRelay(relayID) +} + +func (nodeFacade *NodeCoordinatorFacade) MoveRelay(relayID string, siteID string) { + nodeFacade.node.DisconnectRelay(relayID) +} + +func (nodeFacade *NodeCoordinatorFacade) DisconnectRelays(partitionNumber uint64) { + nodeFacade.node.DisconnectRelayByPartition(partitionNumber) +} + +func (nodeFacade *NodeCoordinatorFacade) OwnedPartitionReplicas() map[uint64]map[uint64]bool { + var ownedPartitionReplicas map[uint64]map[uint64]bool = make(map[uint64]map[uint64]bool, 0) + + for _, partitionReplica := range nodeFacade.node.configController.ClusterController().LocalNodeOwnedPartitionReplicas() { + if _, ok := ownedPartitionReplicas[partitionReplica.Partition]; !ok { + ownedPartitionReplicas[partitionReplica.Partition] = make(map[uint64]bool) + } + + ownedPartitionReplicas[partitionReplica.Partition][partitionReplica.Replica] = true + } + + return ownedPartitionReplicas +} + +func (nodeFacade *NodeCoordinatorFacade) HeldPartitionReplicas() map[uint64]map[uint64]bool { + var heldPartitionReplicas map[uint64]map[uint64]bool = make(map[uint64]map[uint64]bool, 0) + + for _, partitionReplica := range nodeFacade.node.configController.ClusterController().LocalNodeHeldPartitionReplicas() { + if _, ok := heldPartitionReplicas[partitionReplica.Partition]; !ok { + heldPartitionReplicas[partitionReplica.Partition] = make(map[uint64]bool) + } + + heldPartitionReplicas[partitionReplica.Partition][partitionReplica.Replica] = true + } + + return heldPartitionReplicas +} + +func (nodeFacade *NodeCoordinatorFacade) NeighborsWithCapacity() int { + var n int + + for _, node := range nodeFacade.node.configController.ClusterController().State.Nodes { + if node.Capacity > 0 { + n++ + } + } + + return n +} + +func (nodeFacade *NodeCoordinatorFacade) NotifyJoinedCluster() { + nodeFacade.node.joinedCluster <- 1 +} + +func (nodeFacade *NodeCoordinatorFacade) NotifyLeftCluster() { + nodeFacade.node.leftCluster <- 1 +} + +func (nodeFacade *NodeCoordinatorFacade) NotifyEmpty() { + nodeFacade.node.notifyEmpty() +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/node/node_state_coordinator.go b/vendor/github.com/armPelionEdge/devicedb/node/node_state_coordinator.go new file mode 100644 index 0000000..c5cc0e4 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/node_state_coordinator.go @@ -0,0 +1,200 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/node_facade" + . "github.com/armPelionEdge/devicedb/logging" +) + +type ClusterNodeStateCoordinator struct { + nodeFacade ClusterNodeCoordinatorFacade + partitionUpdater ClusterNodePartitionUpdater +} + +func NewClusterNodeStateCoordinator(nodeFacade ClusterNodeCoordinatorFacade, partitionUpdater ClusterNodePartitionUpdater) *ClusterNodeStateCoordinator { + if partitionUpdater == nil { + // allow dependency injection for unit testing but by default use default implementation + partitionUpdater = NewNodePartitionUpdater(nodeFacade) + } + + return &ClusterNodeStateCoordinator{ + nodeFacade: nodeFacade, + partitionUpdater: partitionUpdater, + } +} + +func (coordinator *ClusterNodeStateCoordinator) InitializeNodeState() { + ownedPartitionReplicas := coordinator.nodeFacade.OwnedPartitionReplicas() + heldPartitionReplicas := coordinator.nodeFacade.HeldPartitionReplicas() + + for partitionNumber, _ := range heldPartitionReplicas { + coordinator.partitionUpdater.UpdatePartition(partitionNumber) + } + + for partitionNumber, _ := range ownedPartitionReplicas { + coordinator.partitionUpdater.UpdatePartition(partitionNumber) + } + + coordinator.startTransfers() +} + +func (coordinator *ClusterNodeStateCoordinator) startTransfers() { + ownedPartitionReplicas := coordinator.nodeFacade.OwnedPartitionReplicas() + heldPartitionReplicas := coordinator.nodeFacade.HeldPartitionReplicas() + + for partitionNumber, replicas := range ownedPartitionReplicas { + for replicaNumber, _ := range replicas { + if heldPartitionReplicas[partitionNumber] != nil && heldPartitionReplicas[partitionNumber][replicaNumber] { + // Since the node already holds and owns this partition replica there is nothing to do + continue + } + + if heldPartitionReplicas[partitionNumber] == nil || !heldPartitionReplicas[partitionNumber][replicaNumber] { + // This indicates that the node has not yet fully transferred the partition form its old holder + // start transfer initiates the transfer from the old owner and starts downloading data + // if another download is not already in progress for that partition + coordinator.nodeFacade.StartIncomingTransfer(partitionNumber, replicaNumber) + } + } + } +} + +func (coordinator *ClusterNodeStateCoordinator) ProcessClusterUpdates(deltas []ClusterStateDelta) { + for _, delta := range deltas { + switch delta.Type { + case DeltaNodeAdd: + Log.Infof("Local node (id = %d) was added to a cluster.", coordinator.nodeFacade.ID()) + coordinator.nodeFacade.NotifyJoinedCluster() + case DeltaNodeRemove: + Log.Infof("Local node (id = %d) was removed from its cluster. It will now shut down...", coordinator.nodeFacade.ID()) + coordinator.nodeFacade.NotifyLeftCluster() + case DeltaNodeGainPartitionReplica: + partition := delta.Delta.(NodeGainPartitionReplica).Partition + replica := delta.Delta.(NodeGainPartitionReplica).Replica + + Log.Infof("Local node (id = %d) gained a partition replica (%d, %d)", coordinator.nodeFacade.ID(), partition, replica) + + coordinator.partitionUpdater.UpdatePartition(partition) + case DeltaNodeLosePartitionReplica: + partition := delta.Delta.(NodeLosePartitionReplica).Partition + replica := delta.Delta.(NodeLosePartitionReplica).Replica + + Log.Infof("Local node (id = %d) lost a partition replica (%d, %d)", coordinator.nodeFacade.ID(), partition, replica) + + coordinator.partitionUpdater.UpdatePartition(partition) + case DeltaNodeGainPartitionReplicaOwnership: + partition := delta.Delta.(NodeGainPartitionReplicaOwnership).Partition + replica := delta.Delta.(NodeGainPartitionReplicaOwnership).Replica + + Log.Infof("Local node (id = %d) gained ownership over a partition replica (%d, %d)", coordinator.nodeFacade.ID(), partition, replica) + + coordinator.partitionUpdater.UpdatePartition(partition) + coordinator.nodeFacade.StartIncomingTransfer(partition, replica) + case DeltaNodeLosePartitionReplicaOwnership: + partition := delta.Delta.(NodeLosePartitionReplicaOwnership).Partition + replica := delta.Delta.(NodeLosePartitionReplicaOwnership).Replica + + Log.Infof("Local node (id = %d) lost ownership over a partition replica (%d, %d)", coordinator.nodeFacade.ID(), partition, replica) + + coordinator.nodeFacade.StopIncomingTransfer(partition, replica) + coordinator.partitionUpdater.UpdatePartition(partition) + case DeltaSiteAdded: + site := delta.Delta.(SiteAdded).SiteID + coordinator.nodeFacade.AddSite(site) + case DeltaSiteRemoved: + site := delta.Delta.(SiteRemoved).SiteID + coordinator.nodeFacade.RemoveSite(site) + case DeltaRelayAdded: + relay := delta.Delta.(RelayAdded).RelayID + coordinator.nodeFacade.AddRelay(relay) + case DeltaRelayRemoved: + relay := delta.Delta.(RelayRemoved).RelayID + coordinator.nodeFacade.RemoveRelay(relay) + case DeltaRelayMoved: + relay := delta.Delta.(RelayMoved).RelayID + site := delta.Delta.(RelayMoved).SiteID + coordinator.nodeFacade.MoveRelay(relay, site) + } + } + + if len(coordinator.nodeFacade.OwnedPartitionReplicas()) == 0 && (len(coordinator.nodeFacade.HeldPartitionReplicas()) == 0 || coordinator.nodeFacade.NeighborsWithCapacity() == 0) { + coordinator.nodeFacade.NotifyEmpty() + } +} + +type ClusterNodePartitionUpdater interface { + UpdatePartition(partitionNumber uint64) +} + +type NodePartitionUpdater struct { + nodeFacade ClusterNodeCoordinatorFacade +} + +func NewNodePartitionUpdater(nodeFacade ClusterNodeCoordinatorFacade) *NodePartitionUpdater { + return &NodePartitionUpdater{ + nodeFacade: nodeFacade, + } +} + +func (partitionUpdater *NodePartitionUpdater) UpdatePartition(partitionNumber uint64) { + // does this node hold some replica of this partition? + nodeHoldsPartition := len(partitionUpdater.nodeFacade.HeldPartitionReplicas()[partitionNumber]) > 0 + // does this node own some replica of this partition? + nodeOwnsPartition := len(partitionUpdater.nodeFacade.OwnedPartitionReplicas()[partitionNumber]) > 0 + + if !nodeOwnsPartition && !nodeHoldsPartition { + Log.Infof("Local node (id = %d) no longer owns or holds any replica of partition %d. It will remove this partition from its local store", partitionUpdater.nodeFacade.ID(), partitionNumber) + + partitionUpdater.nodeFacade.DisconnectRelays(partitionNumber) + partitionUpdater.nodeFacade.DisableOutgoingTransfers(partitionNumber) + partitionUpdater.nodeFacade.LockPartitionReads(partitionNumber) + partitionUpdater.nodeFacade.LockPartitionWrites(partitionNumber) + partitionUpdater.nodeFacade.RemovePartition(partitionNumber) + + return + } + + partitionUpdater.nodeFacade.AddPartition(partitionNumber) + + if nodeOwnsPartition { + partitionUpdater.nodeFacade.UnlockPartitionWrites(partitionNumber) + } else { + partitionUpdater.nodeFacade.DisconnectRelays(partitionNumber) + partitionUpdater.nodeFacade.LockPartitionWrites(partitionNumber) + } + + if nodeHoldsPartition { + // allow reads so this partition data can be transferred to another node + // or this node can serve reads for this partition + partitionUpdater.nodeFacade.UnlockPartitionReads(partitionNumber) + partitionUpdater.nodeFacade.EnableOutgoingTransfers(partitionNumber) + } else { + // lock reads until this node has finalized a partition transfer + partitionUpdater.nodeFacade.DisableOutgoingTransfers(partitionNumber) + partitionUpdater.nodeFacade.LockPartitionReads(partitionNumber) + } +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/node/options.go b/vendor/github.com/armPelionEdge/devicedb/node/options.go new file mode 100644 index 0000000..c9fd2ff --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/options.go @@ -0,0 +1,69 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/cluster" +) + +type NodeInitializationOptions struct { + StartCluster bool + JoinCluster bool + ClusterSettings ClusterSettings + SeedNodeHost string + SeedNodePort int + ClusterHost string + ClusterPort int + ExternalHost string + ExternalPort int + SyncMaxSessions uint + SyncPathLimit uint32 + SyncPeriod uint + SnapshotDirectory string +} + +func (options NodeInitializationOptions) SnapshotsEnabled() bool { + return options.SnapshotDirectory != "" +} + +func (options NodeInitializationOptions) ShouldStartCluster() bool { + return options.StartCluster +} + +func (options NodeInitializationOptions) ShouldJoinCluster() bool { + return options.JoinCluster +} + +func (options NodeInitializationOptions) ClusterAddress() (host string, port int) { + return options.ClusterHost, options.ClusterPort +} + +func (options NodeInitializationOptions) ExternalAddress() (host string, port int) { + return options.ExternalHost, options.ExternalPort +} + +func (options NodeInitializationOptions) SeedNode() (host string, port int) { + return options.SeedNodeHost, options.SeedNodePort +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/node/snapshotter.go b/vendor/github.com/armPelionEdge/devicedb/node/snapshotter.go new file mode 100644 index 0000000..e6d06f9 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node/snapshotter.go @@ -0,0 +1,208 @@ +package node +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "os" + "archive/tar" + "fmt" + "io" + "io/ioutil" + "path" + "sync" + + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/storage" +) + + +type Snapshotter struct { + nodeID uint64 + snapshotsDirectory string + storageDriver StorageDriver + ongoingSnapshots map[string]bool + mu sync.Mutex +} + +func (snapshotter *Snapshotter) lazyInit() { + snapshotter.mu.Lock() + defer snapshotter.mu.Unlock() + + if snapshotter.ongoingSnapshots != nil { + return + } + + snapshotter.ongoingSnapshots = make(map[string]bool) +} + +func (snapshotter *Snapshotter) Snapshot(snapshotIndex uint64, snapshotId string) error { + snapshotter.lazyInit() + + Log.Infof("Local node (id = %d) taking a snapshot of its storage state for a consistent cluster snapshot (id = %s)", snapshotter.nodeID, snapshotId) + + snapshotDir := snapshotter.snapshotsDirectory + + if snapshotDir == "" { + Log.Warningf("Cannot take snapshot because no snapshot directory is configured") + + return ESnapshotsNotEnabled + } + + snapshotDir = path.Join(snapshotDir, fmt.Sprintf("snapshot-%s-%d", snapshotId, snapshotter.nodeID)) + + snapshotter.startSnapshot(snapshotId) + defer snapshotter.stopSnapshot(snapshotId) + + if err := snapshotter.storageDriver.Snapshot(snapshotDir, []byte{ SnapshotMetadataPrefix }, map[string]string{ SnapshotUUIDKey: snapshotId }); err != nil { + Log.Errorf("Unable to create a snapshot of node storage at %s: %v", snapshotDir, err) + + return err + } + + Log.Infof("Local node (id = %d) created a snapshot of its local state (id = %s) at %s", snapshotter.nodeID, snapshotId, snapshotDir) + + return nil +} + +func (snapshotter *Snapshotter) startSnapshot(snapshotId string) { + snapshotter.mu.Lock() + defer snapshotter.mu.Unlock() + + snapshotter.ongoingSnapshots[snapshotId] = true +} + +func (snapshotter *Snapshotter) stopSnapshot(snapshotId string) { + snapshotter.mu.Lock() + defer snapshotter.mu.Unlock() + + delete(snapshotter.ongoingSnapshots, snapshotId) +} + +func (snapshotter *Snapshotter) isSnapshotInProgress(snapshotId string) bool { + snapshotter.mu.Lock() + defer snapshotter.mu.Unlock() + + if snapshotter.ongoingSnapshots == nil { + return false + } + + return snapshotter.ongoingSnapshots[snapshotId] +} + +func (snapshotter *Snapshotter) CheckSnapshotStatus(snapshotId string) error { + snapshotDir := snapshotter.snapshotsDirectory + + if snapshotDir == "" { + Log.Warningf("Cannot take snapshot because no snapshot directory is configured") + + return ESnapshotsNotEnabled + } + + snapshotDir = path.Join(snapshotDir, fmt.Sprintf("snapshot-%s-%d", snapshotId, snapshotter.nodeID)) + + if snapshotter.isSnapshotInProgress(snapshotId) { + return ESnapshotInProgress + } + + snapshotStorage, err := snapshotter.storageDriver.OpenSnapshot(snapshotDir) + + if err != nil { + Log.Warningf("Unable to open snapshot at %s: %v", snapshotDir, err) + + return ESnapshotOpenFailed + } + + defer snapshotStorage.Close() + + snapshotMetadata := NewPrefixedStorageDriver([]byte{ SnapshotMetadataPrefix }, snapshotStorage) + values, err := snapshotMetadata.Get([][]byte{ []byte(SnapshotUUIDKey) }) + + if err != nil { + Log.Warningf("Unable to read snapshot metadata at %s: %v", snapshotDir, err) + + return ESnapshotReadFailed + } + + if len(values[0]) == 0 { + Log.Warningf("Snapshot metadata incomplete at %s", snapshotDir) + + return ESnapshotReadFailed + } + + if string(values[0]) != snapshotId { + Log.Warningf("Snapshot metadata UUID mismatch at %s: %s != %s", snapshotDir, snapshotId, string(values[0])) + + return ESnapshotReadFailed + } + + return nil +} + +func (snapshotter *Snapshotter) WriteSnapshot(snapshotId string, w io.Writer) error { + snapshotDir := path.Join(snapshotter.snapshotsDirectory, fmt.Sprintf("snapshot-%s-%d", snapshotId, snapshotter.nodeID)) + + return writeSnapshot(snapshotDir, w) +} + +func writeSnapshot(snapshotDirectory string, w io.Writer) error { + files, err := ioutil.ReadDir(snapshotDirectory) + + if err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + + for _, file := range files { + err := tw.WriteHeader(&tar.Header{ + Name: file.Name(), + Mode: int64(file.Mode()), + Size: file.Size(), + }) + + if err != nil { + return err + } + + filePath := path.Join(snapshotDirectory, file.Name()) + fileReader, err := os.Open(filePath) + + if err != nil { + return err + } + + defer fileReader.Close() + + _, err = io.Copy(tw, fileReader) + + if err != nil { + return err + } + } + + return nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/node_facade/node_coordinator_facade.go b/vendor/github.com/armPelionEdge/devicedb/node_facade/node_coordinator_facade.go new file mode 100644 index 0000000..1f1c81d --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/node_facade/node_coordinator_facade.go @@ -0,0 +1,83 @@ +package node_facade +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type ClusterNodeCoordinatorFacade interface { + ID() uint64 + // Create, initialize, and add a partition to the node's + // partition pool if it isn't already there. Initializes the partition as both + // read and write locked + AddPartition(partitionNumber uint64) + // Remove a partition from the node's partition pool. + RemovePartition(partitionNumber uint64) + // Allow other nodes to request a copy of this partition's + // data + EnableOutgoingTransfers(partitionNumber uint64) + // Disallow other nodes from requesting a copy of this partition's + // data. Cancel any current outgoing transfers for this partition + DisableOutgoingTransfers(partitionNumber uint64) + // Obtain a copy of this partition's data if necessary from another + // node and then transfer holdership of this partition replica + // to this node + StartIncomingTransfer(partitionNumber uint64, replicaNumber uint64) + // Stop any pending or ongoing transfers of this partition replica to + // this node. Also cancel any downloads for this partition from + // any other replicas + StopIncomingTransfer(partitionNumber uint64, replicaNumber uint64) + // Ensure that no updates can occur to the local copy of this partition + LockPartitionWrites(partitionNumber uint64) + // Allow updates to the local copy of this partition + UnlockPartitionWrites(partitionNumber uint64) + // Ensure that the local copy of this partition cannot be read for + // transfers or any other purpose + LockPartitionReads(partitionNumber uint64) + // Allow the local copy of this partition to serve reads + UnlockPartitionReads(partitionNumber uint64) + // Add site to the partition that it belongs to + // if this node owns that partition + AddSite(siteID string) + // Remove site from the partition that it belongs to + // if this node owns that partition. Disconnect any + // relays that are in that site + RemoveSite(siteID string) + AddRelay(relayID string) + RemoveRelay(relayID string) + MoveRelay(relayID string, siteID string) + DisconnectRelays(partitionNumber uint64) + // Return a count of cluster members that have non-zero capacity + NeighborsWithCapacity() int + // Obtain a two dimensional map indicating which partition replicas are + // currently owned by this node. map[partitionNumber][replicaNumber] + OwnedPartitionReplicas() map[uint64]map[uint64]bool + // Obtain a two dimensional map indicating which partition replicas are + // currently held by this node. map[partitionNumber][replicaNumber] + HeldPartitionReplicas() map[uint64]map[uint64]bool + // Notify a node that it has been added to a cluster + NotifyJoinedCluster() + // Notify a node that it has been removed from a cluster + NotifyLeftCluster() + // Notify a node that it no longer owns or holds any partition replicas + NotifyEmpty() +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/partition/partition.go b/vendor/github.com/armPelionEdge/devicedb/partition/partition.go new file mode 100644 index 0000000..b7595c9 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/partition/partition.go @@ -0,0 +1,75 @@ +package partition +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/site" +) + +type Partition interface { + Partition() uint64 + Sites() SitePool + Iterator() PartitionIterator + LockWrites() + UnlockWrites() + LockReads() + UnlockReads() +} + +type DefaultPartition struct { + partition uint64 + sitePool SitePool +} + +func NewDefaultPartition(partition uint64, sitePool SitePool) *DefaultPartition { + return &DefaultPartition{ + partition: partition, + sitePool: sitePool, + } +} + +func (partition *DefaultPartition) Partition() uint64 { + return partition.partition +} + +func (partition *DefaultPartition) Sites() SitePool { + return partition.sitePool +} + +func (partition *DefaultPartition) Iterator() PartitionIterator { + return partition.sitePool.Iterator() +} + +func (partition *DefaultPartition) LockWrites() { +} + +func (partition *DefaultPartition) UnlockWrites() { +} + +func (partition *DefaultPartition) LockReads() { +} + +func (partition *DefaultPartition) UnlockReads() { +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/partition/partition_factory.go b/vendor/github.com/armPelionEdge/devicedb/partition/partition_factory.go new file mode 100644 index 0000000..6f58951 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/partition/partition_factory.go @@ -0,0 +1,44 @@ +package partition +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/site" +) + +type PartitionFactory interface { + CreatePartition(partitionNumber uint64, sitePool SitePool) Partition +} + +type DefaultPartitionFactory struct { +} + +func NewDefaultPartitionFactory() *DefaultPartitionFactory { + return &DefaultPartitionFactory{ } +} + +func (partitionFactory *DefaultPartitionFactory) CreatePartition(partitionNumber uint64, sitePool SitePool) Partition { + return NewDefaultPartition(partitionNumber, sitePool) +} diff --git a/vendor/github.com/armPelionEdge/devicedb/partition/partition_iterator.go b/vendor/github.com/armPelionEdge/devicedb/partition/partition_iterator.go new file mode 100644 index 0000000..725677d --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/partition/partition_iterator.go @@ -0,0 +1,44 @@ +package partition +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/data" +) + +type PartitionIterator interface { + Next() bool + // The site that the current entry belongs to + Site() string + // The bucket that the current entry belongs to within its site + Bucket() string + // The key of the current entry + Key() string + // The value of the current entry + Value() *SiblingSet + // The checksum of the current entry + Release() + Error() error +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/partition/partition_pool.go b/vendor/github.com/armPelionEdge/devicedb/partition/partition_pool.go new file mode 100644 index 0000000..545ec4e --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/partition/partition_pool.go @@ -0,0 +1,67 @@ +package partition +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "sync" +) + +type PartitionPool interface { + Add(partition Partition) + Remove(partitionNumber uint64) + Get(partitionNumber uint64) Partition +} + +type DefaultPartitionPool struct { + lock sync.Mutex + partitions map[uint64]Partition +} + +func NewDefaultPartitionPool() *DefaultPartitionPool { + return &DefaultPartitionPool{ + partitions: make(map[uint64]Partition, 0), + } +} + +func (partitionPool *DefaultPartitionPool) Add(partition Partition) { + partitionPool.lock.Lock() + defer partitionPool.lock.Unlock() + + partitionPool.partitions[partition.Partition()] = partition +} + +func (partitionPool *DefaultPartitionPool) Remove(partitionNumber uint64) { + partitionPool.lock.Lock() + defer partitionPool.lock.Unlock() + + delete(partitionPool.partitions, partitionNumber) +} + +func (partitionPool *DefaultPartitionPool) Get(partitionNumber uint64) Partition { + partitionPool.lock.Lock() + defer partitionPool.lock.Unlock() + + return partitionPool.partitions[partitionNumber] +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/raft/memory_storage.go b/vendor/github.com/armPelionEdge/devicedb/raft/memory_storage.go new file mode 100644 index 0000000..de34ce2 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/raft/memory_storage.go @@ -0,0 +1,136 @@ +package raft +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" +) + +type RaftMemoryStorage struct { + memoryStorage *raft.MemoryStorage + isEmpty bool + isDecomissioning bool + nodeID uint64 +} + +func NewRaftMemoryStorage() *RaftMemoryStorage { + return &RaftMemoryStorage{ + isEmpty: true, + memoryStorage: raft.NewMemoryStorage(), + } +} + +func (raftStorage *RaftMemoryStorage) InitialState() (raftpb.HardState, raftpb.ConfState, error) { + return raftStorage.memoryStorage.InitialState() +} + +func (raftStorage *RaftMemoryStorage) Entries(lo, hi, maxSize uint64) ([]raftpb.Entry, error) { + return raftStorage.memoryStorage.Entries(lo, hi, maxSize) +} + +func (raftStorage *RaftMemoryStorage) Term(i uint64) (uint64, error) { + return raftStorage.memoryStorage.Term(i) +} + +func (raftStorage *RaftMemoryStorage) LastIndex() (uint64, error) { + return raftStorage.memoryStorage.LastIndex() +} + +func (raftStorage *RaftMemoryStorage) FirstIndex() (uint64, error) { + return raftStorage.memoryStorage.FirstIndex() +} + +func (raftStorage *RaftMemoryStorage) Snapshot() (raftpb.Snapshot, error) { + return raftStorage.memoryStorage.Snapshot() +} + +func (raftStorage *RaftMemoryStorage) Open() error { + return nil +} + +func (raftStorage *RaftMemoryStorage) Close() error { + return nil +} + +func (raftStorage *RaftMemoryStorage) IsEmpty() bool { + return raftStorage.isEmpty +} + +func (raftStorage *RaftMemoryStorage) SetIsEmpty(b bool) { + raftStorage.isEmpty = b +} + +func (raftStorage *RaftMemoryStorage) SetDecommissioningFlag() error { + raftStorage.isDecomissioning = true + + return nil +} + +func (raftStorage *RaftMemoryStorage) IsDecommissioning() (bool, error) { + return raftStorage.isDecomissioning, nil +} + +func (raftStorage *RaftMemoryStorage) SetNodeID(id uint64) error { + raftStorage.nodeID = id + + return nil +} + +func (raftStorage *RaftMemoryStorage) NodeID() (uint64, error) { + return raftStorage.nodeID, nil +} + +func (raftStorage *RaftMemoryStorage) Append(entries []raftpb.Entry) error { + return raftStorage.memoryStorage.Append(entries) +} + +func (raftStorage *RaftMemoryStorage) SetHardState(st raftpb.HardState) error { + return raftStorage.memoryStorage.SetHardState(st) +} + +func (raftStorage *RaftMemoryStorage) ApplySnapshot(snap raftpb.Snapshot) error { + return raftStorage.memoryStorage.ApplySnapshot(snap) +} + +func (raftStorage *RaftMemoryStorage) CreateSnapshot(i uint64, cs *raftpb.ConfState, data []byte) (raftpb.Snapshot, error) { + return raftStorage.memoryStorage.CreateSnapshot(i, cs, data) +} + +func (raftStorage *RaftMemoryStorage) ApplyAll(hs raftpb.HardState, ents []raftpb.Entry, snap raftpb.Snapshot) error { + raftStorage.Append(ents) + + // update hard state if set + if !raft.IsEmptyHardState(hs) { + raftStorage.SetHardState(hs) + } + + // apply snapshot + if !raft.IsEmptySnap(snap) { + raftStorage.ApplySnapshot(snap) + } + + return nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/raft/node.go b/vendor/github.com/armPelionEdge/devicedb/raft/node.go new file mode 100644 index 0000000..4e06b22 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/raft/node.go @@ -0,0 +1,393 @@ +package raft +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "math" + "time" + "errors" + "context" + + . "github.com/armPelionEdge/devicedb/logging" + + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + //"golang.org/x/net/context" +) + +var ECancelConfChange = errors.New("Conf change cancelled") + +// Limit on the number of entries that can accumulate before a snapshot and compaction occurs +var LogCompactionSize int = 1000 + +type RaftNodeConfig struct { + ID uint64 + CreateClusterIfNotExist bool + Storage RaftNodeStorage + GetSnapshot func() ([]byte, error) + Context []byte +} + +type RaftNode struct { + config *RaftNodeConfig + node raft.Node + isRunning bool + stop chan int + lastReplayIndex uint64 + lastCommittedIndex uint64 + onMessagesCB func([]raftpb.Message) error + onSnapshotCB func(raftpb.Snapshot) error + onEntryCB func(raftpb.Entry) error + onErrorCB func(error) error + onReplayDoneCB func() error + currentRaftConfState raftpb.ConfState +} + +func NewRaftNode(config *RaftNodeConfig) *RaftNode { + raftNode := &RaftNode{ + config: config, + node: nil, + } + + return raftNode +} + +func (raftNode *RaftNode) ID() uint64 { + return raftNode.config.ID +} + +func (raftNode *RaftNode) AddNode(ctx context.Context, nodeID uint64, context []byte) error { + Log.Infof("Node %d proposing addition of node %d to its cluster", raftNode.config.ID, nodeID) + + err := raftNode.node.ProposeConfChange(ctx, raftpb.ConfChange{ + ID: nodeID, + Type: raftpb.ConfChangeAddNode, + NodeID: nodeID, + Context: context, + }) + + if err != nil { + Log.Errorf("Node %d was unable to propose addition of node %d to its cluster: %s", raftNode.config.ID, nodeID, err.Error()) + } + + return err +} + +func (raftNode *RaftNode) RemoveNode(ctx context.Context, nodeID uint64, context []byte) error { + Log.Infof("Node %d proposing removal of node %d from its cluster", raftNode.config.ID, nodeID) + + err := raftNode.node.ProposeConfChange(ctx, raftpb.ConfChange{ + ID: nodeID, + Type: raftpb.ConfChangeRemoveNode, + NodeID: nodeID, + Context: context, + }) + + if err != nil { + Log.Errorf("Node %d was unable to propose removal of node %d from its cluster: %s", raftNode.config.ID, nodeID, err.Error()) + } + + return err +} + +func (raftNode *RaftNode) Propose(ctx context.Context, proposition []byte) error { + return raftNode.node.Propose(ctx, proposition) +} + +func (raftNode *RaftNode) LastSnapshot() (raftpb.Snapshot, error) { + return raftNode.config.Storage.Snapshot() +} + +func (raftNode *RaftNode) CommittedEntries() ([]raftpb.Entry, error) { + firstIndex, err := raftNode.config.Storage.FirstIndex() + + if err != nil { + return nil, err + } + + if firstIndex == raftNode.lastCommittedIndex + 1 { + return []raftpb.Entry{}, nil + } + + return raftNode.config.Storage.Entries(firstIndex, raftNode.lastCommittedIndex + 1, math.MaxUint64) +} + +func (raftNode *RaftNode) Start() error { + raftNode.stop = make(chan int) + raftNode.isRunning = true + + if err := raftNode.config.Storage.Open(); err != nil { + return err + } + + nodeContext := raftNode.config.Context + + if nodeContext == nil { + nodeContext = []byte{ } + } + + config := &raft.Config{ + ID: raftNode.config.ID, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: raftNode.config.Storage, + MaxSizePerMsg: math.MaxUint16, + MaxInflightMsgs: 256, + } + + if !raftNode.config.Storage.IsEmpty() { + // indicates that this node has already been run before + raftNode.node = raft.RestartNode(config) + } else { + // by default create a new cluster with one member (this node) + peers := []raft.Peer{ raft.Peer{ ID: raftNode.config.ID, Context: raftNode.config.Context } } + + if !raftNode.config.CreateClusterIfNotExist { + // indicates that this node should join an existing cluster + peers = nil + } else { + Log.Infof("Creating a new single node cluster") + } + + raftNode.node = raft.StartNode(config, peers) + } + + hardState, _, _ := raftNode.config.Storage.InitialState() + + // this keeps track of the highest index that is knowns + // to have been committed to the cluster + if !raft.IsEmptyHardState(hardState) { + raftNode.lastReplayIndex = hardState.Commit + } + + Log.Debugf("Starting up raft node. Last known commit index was %v", raftNode.lastReplayIndex) + + go raftNode.run() + + return nil +} + +func (raftNode *RaftNode) Receive(ctx context.Context, msg raftpb.Message) error { + return raftNode.node.Step(ctx, msg) +} + +func (raftNode *RaftNode) notifyIfReplayDone() { + if raftNode.lastCommittedIndex == raftNode.lastReplayIndex { + raftNode.onReplayDoneCB() + } +} + +func (raftNode *RaftNode) run() { + ticker := time.Tick(time.Second) + + defer func() { + // makes sure cleanup happens when the loop exits + raftNode.lastCommittedIndex = 0 + raftNode.lastReplayIndex = 0 + raftNode.node.Stop() + raftNode.config.Storage.Close() + raftNode.currentRaftConfState = raftpb.ConfState{ } + }() + + lastSnapshot, _ := raftNode.LastSnapshot() + + if !raft.IsEmptySnap(lastSnapshot) { + // It is essential that this happen so that when a new snapshot occurs + // it does not write an empty conf with no nodes in it. This will corrupt + // the cluster state. This caused an error where a node forgot that it belonged + // to its own cluster and couldn't become a leader or start a campaign. + raftNode.currentRaftConfState = lastSnapshot.Metadata.ConfState + // call onSnapshot callback to give initial state to system config + raftNode.onSnapshotCB(lastSnapshot) + + if lastSnapshot.Metadata.Index == raftNode.lastReplayIndex { + raftNode.lastCommittedIndex = raftNode.lastReplayIndex + } + } + + raftNode.notifyIfReplayDone() + + for { + select { + case <-ticker: + raftNode.node.Tick() + case rd := <-raftNode.node.Ready(): + // Saves raft state to persistent storage first. If the process dies or fails after this point + // This ensures that there is a checkpoint to resume from on restart + if err := raftNode.saveToStorage(rd.HardState, rd.Entries, rd.Snapshot); err != nil { + raftNode.onErrorCB(err) + + return + } + + // Messages must be sent after entries and hard state are saved to stable storage + if len(rd.Messages) != 0 { + raftNode.onMessagesCB(rd.Messages) + } + + if !raft.IsEmptySnap(rd.Snapshot) { + // snapshots received from other nodes. + // Used to allow this node to catch up + raftNode.onSnapshotCB(rd.Snapshot) + raftNode.lastCommittedIndex = rd.Snapshot.Metadata.Index + raftNode.notifyIfReplayDone() + } + + for _, entry := range rd.CommittedEntries { + // Skip an entry if it has already been applied. + // I don't know why I would receive a committed entry here + // twice but the raft example does this so I'm doing it. + if entry.Index < raftNode.lastCommittedIndex { + continue + } + + cancelConfChange := raftNode.onEntryCB(entry) == ECancelConfChange + + raftNode.lastCommittedIndex = entry.Index + + if entry.Type == raftpb.EntryConfChange { + if err := raftNode.applyConfigurationChange(entry, cancelConfChange); err != nil { + raftNode.onErrorCB(err) + + return + } + } + + raftNode.notifyIfReplayDone() + } + + // Snapshot current state and perform a compaction of entries + // if the number of entries exceeds a certain theshold + if err := raftNode.takeSnapshotIfEnoughEntries(); err != nil { + raftNode.onErrorCB(err) + + return + } + + raftNode.node.Advance() + case <-raftNode.stop: + return + } + } +} + +func (raftNode *RaftNode) saveToStorage(hs raftpb.HardState, ents []raftpb.Entry, snap raftpb.Snapshot) error { + // Ensures that all updates get applied atomically to persistent storage: HardState, Entries, Snapshot. + // If any part of the update fails then no change is applied. This is important so that the persistent state + // remains consistent. + if err := raftNode.config.Storage.ApplyAll(hs, ents, snap); err != nil { + return err + } + + return nil +} + +func (raftNode *RaftNode) applyConfigurationChange(entry raftpb.Entry, cancelConfChange bool) error { + var confChange raftpb.ConfChange + + if err := confChange.Unmarshal(entry.Data); err != nil { + return err + } + + if cancelConfChange { + // From the etcd/raft docs: "The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange" + switch confChange.Type { + case raftpb.ConfChangeAddNode: + Log.Debugf("Ignoring proposed addition of node %d", confChange.NodeID) + case raftpb.ConfChangeRemoveNode: + Log.Debugf("Ignoring proposed removal of node %d", confChange.NodeID) + } + + confChange.NodeID = 0 + } + + raftNode.currentRaftConfState = *raftNode.node.ApplyConfChange(confChange) + + return nil +} + +func (raftNode *RaftNode) takeSnapshotIfEnoughEntries() error { + lastSnapshot, err := raftNode.config.Storage.Snapshot() + + if err != nil { + return err + } + + if raftNode.lastCommittedIndex < lastSnapshot.Metadata.Index { + return nil + } + + if raftNode.lastCommittedIndex - lastSnapshot.Metadata.Index >= uint64(LogCompactionSize) { + // data is my config state snapshot + data, err := raftNode.config.GetSnapshot() + + if err != nil { + return err + } + + _, err = raftNode.config.Storage.CreateSnapshot(raftNode.lastCommittedIndex, &raftNode.currentRaftConfState, data) + + if err != nil { + return err + } + } + + return nil +} + +func (raftNode *RaftNode) Stop() { + if raftNode.isRunning { + raftNode.isRunning = false + close(raftNode.stop) + } +} + +func (raftNode *RaftNode) OnReplayDone(cb func() error) { + raftNode.onReplayDoneCB = cb +} + +func (raftNode *RaftNode) OnMessages(cb func([]raftpb.Message) error) { + raftNode.onMessagesCB = cb +} + +func (raftNode *RaftNode) OnSnapshot(cb func(raftpb.Snapshot) error) { + raftNode.onSnapshotCB = cb +} + +func (raftNode *RaftNode) OnCommittedEntry(cb func(raftpb.Entry) error) { + raftNode.onEntryCB = cb +} + +func (raftNode *RaftNode) OnError(cb func(error) error) { + raftNode.onErrorCB = cb +} + +func (raftNode *RaftNode) ReportUnreachable(id uint64) { + raftNode.node.ReportUnreachable(id) +} + +func (raftNode *RaftNode) ReportSnapshot(id uint64, status raft.SnapshotStatus) { + raftNode.node.ReportSnapshot(id, status) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/raft/storage.go b/vendor/github.com/armPelionEdge/devicedb/raft/storage.go new file mode 100644 index 0000000..53406cb --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/raft/storage.go @@ -0,0 +1,696 @@ +package raft +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "sync" + "errors" + + . "github.com/armPelionEdge/devicedb/storage" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "encoding/binary" +) + +var KeySnapshot = []byte{ 0 } +var KeyHardState = []byte{ 1 } +var KeyPrefixEntry = []byte{ 2 } +var KeyNodeID = []byte{ 3 } +var KeyIsDecommissioning = []byte{ 4 } + +func entryKey(index uint64) []byte { + key := make([]byte, 0, len(KeyPrefixEntry) + 8) + indexBytes := make([]byte, 8) + + binary.BigEndian.PutUint64(indexBytes, index) + + key = append(key, KeyPrefixEntry...) + key = append(key, indexBytes...) + + return key +} + +func entryIndex(e []byte) (uint64, error) { + if len(e) != len(KeyPrefixEntry) + 8 { + return 0, errors.New("Unable to decode entry key") + } + + return binary.BigEndian.Uint64(e[len(KeyPrefixEntry):]), nil +} + +// it is up to the caller to ensure lastIndex >= firstIndex +func entryKeys(firstIndex, lastIndex uint64) [][]byte { + if firstIndex > lastIndex { + return [][]byte{ } + } + + keys := make([][]byte, lastIndex - firstIndex + 1) + + for i := firstIndex; i <= lastIndex; i++ { + keys[i - firstIndex] = entryKey(i) + } + + return keys +} + +type RaftNodeStorage interface { + raft.Storage + IsEmpty() bool + Open() error + Close() error + SetDecommissioningFlag() error + IsDecommissioning() (bool, error) + SetNodeID(id uint64) error + NodeID() (uint64, error) + SetHardState(st raftpb.HardState) error + ApplySnapshot(snap raftpb.Snapshot) error + CreateSnapshot(i uint64, cs *raftpb.ConfState, data []byte) (raftpb.Snapshot, error) + Append(entries []raftpb.Entry) error + ApplyAll(hs raftpb.HardState, ents []raftpb.Entry, snap raftpb.Snapshot) error +} + +type RaftStorage struct { + isOpen bool + isEmpty bool + storageDriver StorageDriver + lock sync.Mutex + memoryStorage *raft.MemoryStorage + decommissioning *bool +} + +func NewRaftStorage(storageDriver StorageDriver) *RaftStorage { + return &RaftStorage{ + isEmpty: true, + storageDriver: storageDriver, + memoryStorage: raft.NewMemoryStorage(), + } +} + +func (raftStorage *RaftStorage) cloneMemoryStorage() raft.MemoryStorage { + return *raftStorage.memoryStorage +} + +func (raftStorage *RaftStorage) restoreMemoryStorage(s raft.MemoryStorage) { + *raftStorage.memoryStorage = s +} + +func (raftStorage *RaftStorage) Open() error { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + // If this storage has already been opened ignore this call to open + if raftStorage.isOpen { + return nil + } + + if err := raftStorage.storageDriver.Open(); err != nil { + return err + } + + // reset memory storage + raftStorage.memoryStorage = raft.NewMemoryStorage() + + snapshotGetResults, err := raftStorage.storageDriver.Get([][]byte{ KeySnapshot }) + + if err != nil { + return err + } + + if snapshotGetResults[0] != nil { + var snapshot raftpb.Snapshot + + err := snapshot.Unmarshal(snapshotGetResults[0]) + + if err != nil { + return err + } + + err = raftStorage.memoryStorage.ApplySnapshot(snapshot) + + if err != nil { + return err + } + + raftStorage.isEmpty = false + } + + hardStateGetResults, err := raftStorage.storageDriver.Get([][]byte{ KeyHardState }) + + if err != nil { + return err + } + + if hardStateGetResults[0] != nil { + var hardState raftpb.HardState + + err := hardState.Unmarshal(hardStateGetResults[0]) + + if err != nil { + return err + } + + err = raftStorage.memoryStorage.SetHardState(hardState) + + if err != nil { + return err + } + + raftStorage.isEmpty = false + } + + entriesIterator, err := raftStorage.storageDriver.GetMatches([][]byte{ KeyPrefixEntry }) + + if err != nil { + return err + } + + var entries = []raftpb.Entry{ } + var lastEntryIndex *uint64 = nil + + for entriesIterator.Next() { + var entry raftpb.Entry + + ek := entriesIterator.Key() + encodedEntry := entriesIterator.Value() + expectedEntryIndex, err := entryIndex(ek) + + if err != nil { + return err + } + + if lastEntryIndex != nil && *lastEntryIndex + 1 != expectedEntryIndex { + return errors.New("Entry indices are not monotonically increasing") + } + + lastEntryIndex = &expectedEntryIndex + + err = entry.Unmarshal(encodedEntry) + + if err != nil { + return err + } + + if entry.Index != expectedEntryIndex { + return errors.New("Encoded entry index does not match index in its key") + } + + entries = append(entries, entry) + raftStorage.isEmpty = false + } + + if entriesIterator.Error() != nil { + return entriesIterator.Error() + } + + err = raftStorage.memoryStorage.Append(entries) + + if err != nil { + return err + } + + raftStorage.isOpen = true + + return nil +} + +func (raftStorage *RaftStorage) Close() error { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + raftStorage.isOpen = false + raftStorage.isEmpty = true + raftStorage.decommissioning = nil + raftStorage.memoryStorage = raft.NewMemoryStorage() + + return raftStorage.storageDriver.Close() +} + +func (raftStorage *RaftStorage) IsEmpty() bool { + return raftStorage.isEmpty +} + +// START raft.Storage interface methods +func (raftStorage *RaftStorage) InitialState() (raftpb.HardState, raftpb.ConfState, error) { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + return raftStorage.memoryStorage.InitialState() +} + +func (raftStorage *RaftStorage) Entries(lo, hi, maxSize uint64) ([]raftpb.Entry, error) { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + return raftStorage.memoryStorage.Entries(lo, hi, maxSize) +} + +func (raftStorage *RaftStorage) Term(i uint64) (uint64, error) { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + return raftStorage.memoryStorage.Term(i) +} + +func (raftStorage *RaftStorage) LastIndex() (uint64, error) { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + return raftStorage.memoryStorage.LastIndex() +} + +func (raftStorage *RaftStorage) FirstIndex() (uint64, error) { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + return raftStorage.memoryStorage.FirstIndex() +} + +func (raftStorage *RaftStorage) Snapshot() (raftpb.Snapshot, error) { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + return raftStorage.memoryStorage.Snapshot() +} +// END raft.Storage interface methods + +func (raftStorage *RaftStorage) SetDecommissioningFlag() error { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + storageBatch := NewBatch() + storageBatch.Put(KeyIsDecommissioning, []byte{ }) + + if err := raftStorage.storageDriver.Batch(storageBatch); err != nil { + return err + } + + var t bool = true + raftStorage.decommissioning = &t + + return nil +} + +func (raftStorage *RaftStorage) IsDecommissioning() (bool, error) { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + if raftStorage.decommissioning != nil { + return *raftStorage.decommissioning, nil + } + + result, err := raftStorage.storageDriver.Get([][]byte{ KeyIsDecommissioning }) + + if err != nil { + return false, err + } + + var b bool + + if result[0] != nil { + b = true + } + + raftStorage.decommissioning = &b + + return *raftStorage.decommissioning, nil +} + +func (raftStorage *RaftStorage) SetNodeID(id uint64) error { + idBytes := make([]byte, 8) + binary.BigEndian.PutUint64(idBytes, id) + + storageBatch := NewBatch() + storageBatch.Put(KeyNodeID, idBytes) + + if err := raftStorage.storageDriver.Batch(storageBatch); err != nil { + return err + } + + return nil +} + +func (raftStorage *RaftStorage) NodeID() (uint64, error) { + result, err := raftStorage.storageDriver.Get([][]byte{ KeyNodeID }) + + if err != nil { + return 0, err + } + + if result[0] == nil { + return 0, nil + } + + if len(result[0]) != 8 { + return 0, nil + } + + return binary.BigEndian.Uint64(result[0]), nil +} + +func (raftStorage *RaftStorage) SetHardState(st raftpb.HardState) error { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + memoryStorageCopy := raftStorage.cloneMemoryStorage() + err := raftStorage.memoryStorage.SetHardState(st) + + if err != nil { + return err + } + + if !raftStorage.isOpen { + return nil + } + + encodedHardState, err := st.Marshal() + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + storageBatch := NewBatch() + storageBatch.Put(KeyHardState, encodedHardState) + + err = raftStorage.storageDriver.Batch(storageBatch) + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + return nil +} + +func (raftStorage *RaftStorage) ApplySnapshot(snap raftpb.Snapshot) error { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + memoryStorageCopy := raftStorage.cloneMemoryStorage() + err := raftStorage.memoryStorage.ApplySnapshot(snap) + + if err != nil { + return err + } + + if !raftStorage.isOpen { + return nil + } + + firstIndex, _ := memoryStorageCopy.FirstIndex() + lastIndex, _ := memoryStorageCopy.LastIndex() + + purgedEntryKeys := entryKeys(firstIndex, lastIndex) + encodedSnap, err := snap.Marshal() + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + storageBatch := NewBatch() + + for _, purgedEntryKey := range purgedEntryKeys { + storageBatch.Delete(purgedEntryKey) + } + + storageBatch.Put(KeySnapshot, encodedSnap) + + err = raftStorage.storageDriver.Batch(storageBatch) + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + return nil +} + +// Atomically take a snapshot of the current state and compact the entries up to the point that the snapshot was taken +func (raftStorage *RaftStorage) CreateSnapshot(i uint64, cs *raftpb.ConfState, data []byte) (raftpb.Snapshot, error) { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + // clone current state of memoryStorage so if persisting the snapshot to disk doesn't work we can + // revert memoryStorage to its original state + memoryStorageCopy := raftStorage.cloneMemoryStorage() + originalFirstIndex, _ := raftStorage.memoryStorage.FirstIndex() + snap, err := raftStorage.memoryStorage.CreateSnapshot(i, cs, data) + + if err != nil { + return raftpb.Snapshot{ }, err + } + + err = raftStorage.memoryStorage.Compact(i) + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return raftpb.Snapshot{ }, err + } + + // if raftStorage isn't open just treat raftStorage like memoryStorage + if !raftStorage.isOpen { + return snap, nil + } + + encodedSnap, err := snap.Marshal() + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return raftpb.Snapshot{ }, err + } + + newFirstIndex, _ := raftStorage.memoryStorage.FirstIndex() + purgedEntryKeys := entryKeys(originalFirstIndex, newFirstIndex - 1) + + storageBatch := NewBatch() + + for _, purgedEntryKey := range purgedEntryKeys { + storageBatch.Delete(purgedEntryKey) + } + + storageBatch.Put(KeySnapshot, encodedSnap) + + err = raftStorage.storageDriver.Batch(storageBatch) + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return raftpb.Snapshot{ }, err + } + + return snap, nil +} + +func (raftStorage *RaftStorage) Append(entries []raftpb.Entry) error { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + if len(entries) == 0 { + return nil + } + + memoryStorageCopy := raftStorage.cloneMemoryStorage() + originalFirstIndex, _ := raftStorage.memoryStorage.FirstIndex() + originalLastIndex, _ := raftStorage.memoryStorage.LastIndex() + + if entries[0].Index + uint64(len(entries)) - 1 < originalFirstIndex { + return nil + } + + err := raftStorage.memoryStorage.Append(entries) + + if err != nil { + return err + } + + if !raftStorage.isOpen { + return nil + } + + // truncate compacted entries + // ignores entries being appended whose index was previously compacted + if originalFirstIndex > entries[0].Index { + entries = entries[originalFirstIndex - entries[0].Index:] + } + + storageBatch := NewBatch() + + // purge all old entries whose index >= entires[0].Index + for i := entries[0].Index; i <= originalLastIndex; i++ { + ek := entryKey(i) + + storageBatch.Delete(ek) + } + + // put all newly appended entries into the storage + for i := 0; i < len(entries); i += 1 { + ek := entryKey(entries[i].Index) + encodedEntry, err := entries[i].Marshal() + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + storageBatch.Put(ek, encodedEntry) + } + + err = raftStorage.storageDriver.Batch(storageBatch) + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + return nil +} + +// This should apply the entries, then the hard state, then the snapshot atomically to both in memory and persistent storage +// This means that if any part fails no change occurs and the error is reported. If persisting the state to disk encounters +// an error then the operation is aborted and no change occurs. +func (raftStorage *RaftStorage) ApplyAll(hs raftpb.HardState, ents []raftpb.Entry, snap raftpb.Snapshot) error { + raftStorage.lock.Lock() + defer raftStorage.lock.Unlock() + + if !raftStorage.isOpen { + return errors.New("ApplyAll only available when node is open") + } + + memoryStorageCopy := raftStorage.cloneMemoryStorage() + storageBatch := NewBatch() + + // apply entries to storage + if len(ents) != 0 { + originalFirstIndex, _ := raftStorage.memoryStorage.FirstIndex() + originalLastIndex, _ := raftStorage.memoryStorage.LastIndex() + + if ents[0].Index + uint64(len(ents)) - 1 < originalFirstIndex { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return nil + } + + err := raftStorage.memoryStorage.Append(ents) + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + // truncate compacted entries + // ignores entries being appended whose index was previously compacted + if originalFirstIndex > ents[0].Index { + ents = ents[originalFirstIndex - ents[0].Index:] + } + + // purge all old entries whose index >= entires[0].Index + for i := ents[0].Index; i <= originalLastIndex; i++ { + ek := entryKey(i) + + storageBatch.Delete(ek) + } + + // put all newly appended entries into the storage + for i := 0; i < len(ents); i += 1 { + ek := entryKey(ents[i].Index) + encodedEntry, err := ents[i].Marshal() + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + storageBatch.Put(ek, encodedEntry) + } + } + + // update hard state if set + if !raft.IsEmptyHardState(hs) { + err := raftStorage.memoryStorage.SetHardState(hs) + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + encodedHardState, err := hs.Marshal() + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + storageBatch.Put(KeyHardState, encodedHardState) + } + + // apply snapshot + if !raft.IsEmptySnap(snap) { + firstIndex, _ := raftStorage.memoryStorage.FirstIndex() + lastIndex, _ := raftStorage.memoryStorage.LastIndex() + err := raftStorage.memoryStorage.ApplySnapshot(snap) + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + purgedEntryKeys := entryKeys(firstIndex, lastIndex) + encodedSnap, err := snap.Marshal() + + if err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + for _, purgedEntryKey := range purgedEntryKeys { + storageBatch.Delete(purgedEntryKey) + } + + storageBatch.Put(KeySnapshot, encodedSnap) + } + + if err := raftStorage.storageDriver.Batch(storageBatch); err != nil { + raftStorage.restoreMemoryStorage(memoryStorageCopy) + + return err + } + + return nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/raft/transport.go b/vendor/github.com/armPelionEdge/devicedb/raft/transport.go new file mode 100644 index 0000000..7bb91a6 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/raft/transport.go @@ -0,0 +1,285 @@ +package raft +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "time" + "strings" + "github.com/gorilla/mux" + "github.com/coreos/etcd/raft/raftpb" + + . "github.com/armPelionEdge/devicedb/logging" + + "fmt" + "errors" + "net/http" + "bytes" + "io" + "io/ioutil" + "context" + "sync" +) + +var ESenderUnknown = errors.New("The receiver does not know who we are") +var EReceiverUnknown = errors.New("The sender does not know the receiver") +var ETimeout = errors.New("The sender timed out while trying to send the message to the receiver") + +const ( + RequestTimeoutSeconds = 10 +) + +type PeerAddress struct { + NodeID uint64 + Host string + Port int +} + +func (peerAddress *PeerAddress) ToHTTPURL(endpoint string) string { + return fmt.Sprintf("http://%s:%d%s", peerAddress.Host, peerAddress.Port, endpoint) +} + +func (peerAddress *PeerAddress) IsEmpty() bool { + return peerAddress.NodeID == 0 +} + +type TransportHub struct { + peers map[uint64]PeerAddress + httpClient *http.Client + onReceiveCB func(context.Context, raftpb.Message) error + lock sync.Mutex + localPeerID uint64 + defaultPeerAddress PeerAddress +} + +func NewTransportHub(localPeerID uint64) *TransportHub { + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{} + transport.MaxIdleConns = 0 + transport.MaxIdleConnsPerHost = 1000 + transport.IdleConnTimeout = defaultTransport.IdleConnTimeout + + hub := &TransportHub{ + localPeerID: localPeerID, + peers: make(map[uint64]PeerAddress), + httpClient: &http.Client{ + Timeout: time.Second * RequestTimeoutSeconds, + Transport: transport, + }, + } + + return hub +} + +func (hub *TransportHub) SetLocalPeerID(id uint64) { + hub.localPeerID = id +} + +func (hub *TransportHub) SetDefaultRoute(host string, port int) { + hub.defaultPeerAddress = PeerAddress{ + Host: host, + Port: port, + } +} + +func (hub *TransportHub) PeerAddress(nodeID uint64) *PeerAddress { + hub.lock.Lock() + defer hub.lock.Unlock() + + peerAddress, ok := hub.peers[nodeID] + + if !ok { + return nil + } + + return &peerAddress +} + +func (hub *TransportHub) AddPeer(peerAddress PeerAddress) { + hub.lock.Lock() + defer hub.lock.Unlock() + hub.peers[peerAddress.NodeID] = peerAddress +} + +func (hub *TransportHub) RemovePeer(peerAddress PeerAddress) { + hub.lock.Lock() + defer hub.lock.Unlock() + delete(hub.peers, peerAddress.NodeID) +} + +func (hub *TransportHub) UpdatePeer(peerAddress PeerAddress) { + hub.lock.Lock() + defer hub.lock.Unlock() + hub.AddPeer(peerAddress) +} + +func (hub *TransportHub) OnReceive(cb func(context.Context, raftpb.Message) error) { + hub.onReceiveCB = cb +} + +func (hub *TransportHub) Send(ctx context.Context, msg raftpb.Message, proxy bool) error { + encodedMessage, err := msg.Marshal() + + if err != nil { + return err + } + + hub.lock.Lock() + peerAddress, ok := hub.peers[msg.To] + hub.lock.Unlock() + + if !ok { + if hub.defaultPeerAddress.Host == "" { + return EReceiverUnknown + } + + peerAddress = hub.defaultPeerAddress + } + + endpointURL := peerAddress.ToHTTPURL("/raftmessages") + + if proxy { + endpointURL += "?forwarded=true" + } + + request, err := http.NewRequest("POST", endpointURL, bytes.NewReader(encodedMessage)) + + if err != nil { + return err + } + + resp, err := hub.httpClient.Do(request) + + if err != nil { + if strings.Contains(err.Error(), "Timeout") { + return ETimeout + } + + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + errorMessage, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode == http.StatusForbidden { + return ESenderUnknown + } + + if err != nil { + return err + } + + return errors.New(fmt.Sprintf("Received error code from server: (%d) %s", resp.StatusCode, string(errorMessage))) + } else { + io.Copy(ioutil.Discard, resp.Body) + } + + return nil +} + +func (hub *TransportHub) Attach(router *mux.Router) { + router.HandleFunc("/raftmessages", func(w http.ResponseWriter, r *http.Request) { + raftMessage, err := ioutil.ReadAll(r.Body) + + if err != nil { + Log.Warningf("POST /raftmessages: Unable to read message body") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + var msg raftpb.Message + + err = msg.Unmarshal(raftMessage) + + if err != nil { + Log.Warningf("POST /raftmessages: Unable to parse message body") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + if msg.To != hub.localPeerID { + Log.Infof("This message isn't bound for us. Need to forward") + // This node is not bound for us. Forward it to its proper destination if we know it + // This feature allows new nodes to use their seed node to send messages throughout the cluster before it knows the addresses of its neighboring nodes + query := r.URL.Query() + _, wasForwarded := query["forwarded"] + + // This message was already proxied by the sender node. The message can only travel for + // one hop so we will return an error since we are not the node this is bound for + if wasForwarded { + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusForbidden) + io.WriteString(w, "\n") + + return + } + + Log.Debugf("POST /raftmessages: Destination node (%d) for message is not this node. Will attempt to proxy the message to its proper recipient", msg.To) + + err := hub.Send(r.Context(), msg, true) + + if err != nil { + Log.Warningf("POST /raftmessages: Unable to proxy message to node (%d): %v", msg.To, err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + + return + } + + err = hub.onReceiveCB(r.Context(), msg) + + if err != nil { + Log.Warningf("POST /raftmessages: Unable to receive message: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("POST") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/resolver/conflict_resolver.go b/vendor/github.com/armPelionEdge/devicedb/resolver/conflict_resolver.go new file mode 100644 index 0000000..d625d79 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/resolver/conflict_resolver.go @@ -0,0 +1,33 @@ +package resolver +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/data" +) + +type ConflictResolver interface { + ResolveConflicts(*SiblingSet) *SiblingSet +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/resolver/strategies/last_writer_wins.go b/vendor/github.com/armPelionEdge/devicedb/resolver/strategies/last_writer_wins.go new file mode 100644 index 0000000..bb3d621 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/resolver/strategies/last_writer_wins.go @@ -0,0 +1,48 @@ +package strategies +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/data" +) + +type LastWriterWins struct { +} + +func (lww *LastWriterWins) ResolveConflicts(siblingSet *SiblingSet) *SiblingSet { + var newestSibling *Sibling + + for sibling := range siblingSet.Iter() { + if newestSibling == nil || sibling.Timestamp() > newestSibling.Timestamp() { + newestSibling = sibling + } + } + + if newestSibling == nil { + return siblingSet + } + + return NewSiblingSet(map[*Sibling]bool{ newestSibling: true }) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/resolver/strategies/multi_value.go b/vendor/github.com/armPelionEdge/devicedb/resolver/strategies/multi_value.go new file mode 100644 index 0000000..b82f055 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/resolver/strategies/multi_value.go @@ -0,0 +1,37 @@ +package strategies +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/data" +) + +type MultiValue struct { +} + +func (mv *MultiValue) ResolveConflicts(siblingSet *SiblingSet) *SiblingSet { + return siblingSet +} + \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/rest/merkle.go b/vendor/github.com/armPelionEdge/devicedb/rest/merkle.go new file mode 100644 index 0000000..7993d70 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/rest/merkle.go @@ -0,0 +1,46 @@ +package rest +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/data" +) + +type MerkleTree struct { + Depth uint8 +} + +type MerkleNode struct { + Hash Hash +} + +type MerkleKeys struct { + Keys []Key +} + +type Key struct { + Key string + Value *SiblingSet +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/cluster.go b/vendor/github.com/armPelionEdge/devicedb/routes/cluster.go new file mode 100644 index 0000000..7d87a10 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/cluster.go @@ -0,0 +1,250 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "github.com/gorilla/mux" + "io" + "io/ioutil" + "net/http" + "strconv" + + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/raft" +) + +type ClusterEndpoint struct { + ClusterFacade ClusterFacade +} + +func (clusterEndpoint *ClusterEndpoint) Attach(router *mux.Router) { + // Get an overview of the cluster + router.HandleFunc("/cluster", func(w http.ResponseWriter, r *http.Request) { + var clusterOverview ClusterOverview + + clusterOverview.Nodes = clusterEndpoint.ClusterFacade.ClusterNodes() + clusterOverview.ClusterSettings = clusterEndpoint.ClusterFacade.ClusterSettings() + clusterOverview.PartitionDistribution = clusterEndpoint.ClusterFacade.PartitionDistribution() + clusterOverview.TokenAssignments = clusterEndpoint.ClusterFacade.TokenAssignments() + + encodedOverview, err := json.Marshal(clusterOverview) + + if err != nil { + Log.Warningf("GET /cluster: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedOverview) + "\n") + }).Methods("GET") + + router.HandleFunc("/cluster/nodes", func(w http.ResponseWriter, r *http.Request) { + // Add a node to the cluster + body, err := ioutil.ReadAll(r.Body) + + if err != nil { + Log.Warningf("POST /cluster/nodes: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EReadBody.JSON()) + "\n") + + return + } + + var nodeConfig NodeConfig + + if err := json.Unmarshal(body, &nodeConfig); err != nil { + Log.Warningf("POST /cluster/nodes: Unable to parse node config body") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ENodeConfigBody.JSON()) + "\n") + + return + } + + if err := clusterEndpoint.ClusterFacade.AddNode(r.Context(), nodeConfig); err != nil { + Log.Warningf("POST /cluster/nodes: Unable to add node to cluster: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + + if err == ECancelConfChange { + io.WriteString(w, string(EDuplicateNodeID.JSON()) + "\n") + } else { + io.WriteString(w, string(EProposalError.JSON()) + "\n") + } + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("POST") + + router.HandleFunc("/cluster/nodes/{nodeID}", func(w http.ResponseWriter, r *http.Request) { + // Remove, replace, or deccommission a node + query := r.URL.Query() + _, wasForwarded := query["forwarded"] + _, replace := query["replace"] + _, decommission := query["decommission"] + + if replace && decommission { + Log.Warningf("DELETE /cluster/nodes/{nodeID}: Both the replace and decommission query parameters are set. This is not allowed") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + nodeID, err := strconv.ParseUint(mux.Vars(r)["nodeID"], 10, 64) + + if err != nil { + Log.Warningf("DELETE /cluster/nodes/{nodeID}: Invalid node ID") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + if nodeID == 0 { + nodeID = clusterEndpoint.ClusterFacade.LocalNodeID() + } + + if decommission { + if nodeID == clusterEndpoint.ClusterFacade.LocalNodeID() { + if err := clusterEndpoint.ClusterFacade.Decommission(); err != nil { + Log.Warningf("DELETE /cluster/nodes/%d: Encountered an error while putting the node into decomissioning mode: %v", clusterEndpoint.ClusterFacade.LocalNodeID(), err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + Log.Infof("DELETE /cluster/nodes/%d: Local node is in decommissioning mode", clusterEndpoint.ClusterFacade.LocalNodeID()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + + return + } + + if wasForwarded { + Log.Warningf("DELETE /cluster/nodes/{nodeID}: Received a forwarded decommission request but we're not the correct node", clusterEndpoint.ClusterFacade.LocalNodeID()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusForbidden) + io.WriteString(w, "\n") + + return + } + + // forward the request to another node + peerAddress := clusterEndpoint.ClusterFacade.PeerAddress(nodeID) + + if peerAddress.IsEmpty() { + Log.Warningf("DELETE /cluster/nodes/{nodeID}: Unable to forward decommission request since this node doesn't know how to contact the decommissioned node", clusterEndpoint.ClusterFacade.LocalNodeID()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadGateway) + io.WriteString(w, "\n") + + return + } + + err := clusterEndpoint.ClusterFacade.DecommissionPeer(nodeID) + + if err != nil { + Log.Warningf("DELETE /cluster/nodes/{nodeID}: Error forwarding decommission request: %v", clusterEndpoint.ClusterFacade.LocalNodeID(), err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadGateway) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + + return + } + + var replacementNodeID uint64 + + if replace { + replacementNodeID, err = strconv.ParseUint(query["replace"][0], 10, 64) + + if err != nil { + Log.Warningf("DELETE /cluster/nodes/{nodeID}: Invalid replacement node ID") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + } + + if replacementNodeID != 0 { + err = clusterEndpoint.ClusterFacade.ReplaceNode(r.Context(), nodeID, replacementNodeID) + } else { + err = clusterEndpoint.ClusterFacade.RemoveNode(r.Context(), nodeID) + } + + if err != nil { + Log.Warningf("DELETE /cluster/nodes/{nodeID}: Unable to remove node from the cluster: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("DELETE") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/cluster_facade.go b/vendor/github.com/armPelionEdge/devicedb/routes/cluster_facade.go new file mode 100644 index 0000000..a45520f --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/cluster_facade.go @@ -0,0 +1,70 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "github.com/gorilla/websocket" + "io" + "net/http" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/raft" +) + +type ClusterFacade interface { + AddNode(ctx context.Context, nodeConfig NodeConfig) error + RemoveNode(ctx context.Context, nodeID uint64) error + ReplaceNode(ctx context.Context, nodeID uint64, replacementNodeID uint64) error + DecommissionPeer(nodeID uint64) error + Decommission() error + LocalNodeID() uint64 + PeerAddress(nodeID uint64) PeerAddress + AddRelay(ctx context.Context, relayID string) error + RemoveRelay(ctx context.Context, relayID string) error + MoveRelay(ctx context.Context, relayID string, siteID string) error + AddSite(ctx context.Context, siteID string) error + RemoveSite(ctx context.Context, siteID string) error + Batch(siteID string, bucket string, updateBatch *UpdateBatch) (BatchResult, error) + LocalBatch(partition uint64, siteID string, bucket string, updateBatch *UpdateBatch) (map[string]*SiblingSet, error) + LocalMerge(partition uint64, siteID string, bucket string, patch map[string]*SiblingSet, broadcastToRelays bool) error + Get(siteID string, bucket string, keys [][]byte) ([]*SiblingSet, error) + LocalGet(partition uint64, siteID string, bucket string, keys [][]byte) ([]*SiblingSet, error) + GetMatches(siteID string, bucket string, keys [][]byte) (SiblingSetIterator, error) + LocalGetMatches(partition uint64, siteID string, bucket string, keys [][]byte) (SiblingSetIterator, error) + AcceptRelayConnection(conn *websocket.Conn, header http.Header) + ClusterNodes() []NodeConfig + ClusterSettings() ClusterSettings + PartitionDistribution() [][]uint64 + TokenAssignments() []uint64 + GetRelayStatus(ctx context.Context, relayID string) (RelayStatus, error) + LocalGetRelayStatus(relayID string) (RelayStatus, error) + LocalLogDump() (LogDump, error) + ClusterSnapshot(ctx context.Context) (Snapshot, error) + CheckLocalSnapshotStatus(snapshotId string) error + WriteLocalSnapshot(snapshotId string, w io.Writer) error +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/kubernetes.go b/vendor/github.com/armPelionEdge/devicedb/routes/kubernetes.go new file mode 100644 index 0000000..96f2e18 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/kubernetes.go @@ -0,0 +1,40 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/gorilla/mux" + "net/http" +) + +type KubernetesEndpoint struct { +} + +func (kubernetesEndpoint *KubernetesEndpoint) Attach(router *mux.Router) { + // For liveness and readiness probes + router.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }).Methods("GET") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/log_dump.go b/vendor/github.com/armPelionEdge/devicedb/routes/log_dump.go new file mode 100644 index 0000000..e91db7a --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/log_dump.go @@ -0,0 +1,70 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "github.com/gorilla/mux" + "io" + "net/http" + + . "github.com/armPelionEdge/devicedb/logging" +) + +type LogDumpEndpoint struct { + ClusterFacade ClusterFacade +} + +func (logDumpEndpoint *LogDumpEndpoint) Attach(router *mux.Router) { + router.HandleFunc("/log_dump", func(w http.ResponseWriter, r *http.Request) { + logDump, err := logDumpEndpoint.ClusterFacade.LocalLogDump() + + if err != nil { + Log.Warningf("GET /log_dump: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + encodedLogDump, err := json.Marshal(logDump) + + if err != nil { + Log.Warningf("GET /log_dump: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedLogDump) + "\n") + }).Methods("GET") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/models.go b/vendor/github.com/armPelionEdge/devicedb/routes/models.go new file mode 100644 index 0000000..8b83071 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/models.go @@ -0,0 +1,115 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "time" + + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/transport" +) + +const ( + SnapshotProcessing string = "processing" + SnapshotFailed string = "failed" + SnapshotComplete string = "completed" + SnapshotMissing string = "missing" +) + +type RelayStatus struct { + Connected bool + ConnectedTo uint64 + Ping time.Duration + Site string +} + +type ClusterOverview struct { + Nodes []NodeConfig + ClusterSettings ClusterSettings + PartitionDistribution [][]uint64 + TokenAssignments []uint64 +} + +type InternalEntry struct { + Prefix string + Key string + Siblings *SiblingSet +} + +func (entry *InternalEntry) ToAPIEntry() *APIEntry { + var transportSiblingSet TransportSiblingSet + + transportSiblingSet.FromSiblingSet(entry.Siblings) + + return &APIEntry{ + Prefix: entry.Prefix, + Key: entry.Key, + Context: transportSiblingSet.Context, + Siblings: transportSiblingSet.Siblings, + } +} + +type APIEntry struct { + Prefix string `json:"prefix"` + Key string `json:"key"` + Context string `json:"context"` + Siblings []string `json:"siblings"` +} + +type BatchResult struct { + // Number of replicas that the batch was successfully applied to + NApplied uint64 `json:"nApplied"` + // Number of replicas in the replica set for this site + Replicas uint64 `json:"replicas"` + // Was write quorum achieved + Quorum bool + Patch map[string]*SiblingSet `json:"patch"` +} + +type RelaySettingsPatch struct { + Site string `json:"site"` +} + +type LogSnapshot struct { + Index uint64 + State ClusterState +} + +type LogEntry struct { + Index uint64 + Command ClusterCommand +} + +type LogDump struct { + BaseSnapshot LogSnapshot + Entries []LogEntry + CurrentSnapshot LogSnapshot +} + +type Snapshot struct { + UUID string `json:"uuid"` + Status string `json:"status"` +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/partitions.go b/vendor/github.com/armPelionEdge/devicedb/routes/partitions.go new file mode 100644 index 0000000..e33a827 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/partitions.go @@ -0,0 +1,371 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "github.com/gorilla/mux" + "io" + "net/http" + "strconv" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" +) + +type PartitionsEndpoint struct { + ClusterFacade ClusterFacade +} + +func (partitionsEndpoint *PartitionsEndpoint) Attach(router *mux.Router) { + // Merge values into a bucket + router.HandleFunc("/partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/merges", func(w http.ResponseWriter, r *http.Request) { + var patch map[string]*SiblingSet + var err error + var decoder *json.Decoder = json.NewDecoder(r.Body) + var broadcast bool = r.URL.Query().Get("broadcast") != "" + + err = decoder.Decode(&patch) + + if err != nil { + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/merges: Unable to parse request body: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + partitionID, err := strconv.ParseUint(mux.Vars(r)["partitionID"], 10, 64) + + if err != nil { + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/merges: Unable to parse partition ID as uint64: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + var siteID string = mux.Vars(r)["siteID"] + var bucket string = mux.Vars(r)["bucketID"] + + err = partitionsEndpoint.ClusterFacade.LocalMerge(partitionID, siteID, bucket, patch, broadcast) + + if err == ENoSuchPartition || err == ENoSuchSite || err == ENoSuchBucket { + var responseBody string + + switch err { + case ENoSuchSite: + responseBody = string(ESiteDoesNotExist.JSON()) + case ENoSuchBucket: + responseBody = string(EBucketDoesNotExist.JSON()) + } + + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/merges: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, responseBody + "\n") + + return + } + + if err != nil && err != ENoQuorum { + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/merges: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + var batchResult BatchResult + batchResult.NApplied = 1 + + if err == ENoQuorum { + batchResult.NApplied = 0 + } + + encodedBatchResult, _ := json.Marshal(batchResult) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedBatchResult) + "\n") + }).Methods("POST") + + // Submit an update to a bucket + router.HandleFunc("/partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/batches", func(w http.ResponseWriter, r *http.Request) { + var updateBatch UpdateBatch + var err error + + err = updateBatch.FromJSON(r.Body) + + if err != nil { + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/batches: Unable to parse request body: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + partitionID, err := strconv.ParseUint(mux.Vars(r)["partitionID"], 10, 64) + + if err != nil { + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/batches: Unable to parse partition ID as uint64: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + var siteID string = mux.Vars(r)["siteID"] + var bucket string = mux.Vars(r)["bucketID"] + + patch, err := partitionsEndpoint.ClusterFacade.LocalBatch(partitionID, siteID, bucket, &updateBatch) + + if err == ENoSuchPartition || err == ENoSuchSite || err == ENoSuchBucket { + var responseBody string + + switch err { + case ENoSuchSite: + responseBody = string(ESiteDoesNotExist.JSON()) + case ENoSuchBucket: + responseBody = string(EBucketDoesNotExist.JSON()) + } + + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/batches: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, responseBody + "\n") + + return + } + + if err != nil && err != ENoQuorum { + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/batches: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + var batchResult BatchResult + batchResult.NApplied = 1 + + if err == ENoQuorum { + batchResult.NApplied = 0 + } else { + batchResult.Patch = patch + } + + encodedBatchResult, _ := json.Marshal(batchResult) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedBatchResult) + "\n") + }).Methods("POST") + + // Query keys in bucket + router.HandleFunc("/partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/keys", func(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query() + keys := query["key"] + prefixes := query["prefix"] + partitionID, err := strconv.ParseUint(mux.Vars(r)["partitionID"], 10, 64) + + if err != nil { + Log.Warningf("GET /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/keys: Unable to parse partition ID as uint64: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + if len(keys) != 0 && len(prefixes) != 0 { + Log.Warningf("GET /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/keys: Client specified both prefixes and keys in the same request") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + if len(keys) == 0 && len(prefixes) == 0 { + var entries []InternalEntry = []InternalEntry{ } + encodedEntries, _ := json.Marshal(entries) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedEntries) + "\n") + + return + } + + var siteID string = mux.Vars(r)["siteID"] + var bucket string = mux.Vars(r)["bucketID"] + + if len(keys) > 0 { + var byteKeys [][]byte = make([][]byte, len(keys)) + + for i, key := range keys { + byteKeys[i] = []byte(key) + } + + siblingSets, err := partitionsEndpoint.ClusterFacade.LocalGet(partitionID, siteID, bucket, byteKeys) + + if err == ENoSuchPartition || err == ENoSuchBucket || err == ENoSuchSite { + var responseBody string + + switch err { + case ENoSuchBucket: + responseBody = string(EBucketDoesNotExist.JSON()) + case ENoSuchSite: + responseBody = string(ESiteDoesNotExist.JSON()) + } + + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/keys: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, responseBody + "\n") + + return + } + + if err != nil { + Log.Warningf("GET /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/keys: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + var entries []InternalEntry = make([]InternalEntry, len(siblingSets)) + + for i, key := range keys { + entries[i] = InternalEntry{ + Prefix: "", + Key: key, + Siblings: siblingSets[i], + } + } + + encodedEntries, _ := json.Marshal(entries) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedEntries) + "\n") + + return + } + + if len(prefixes) > 0 { + var byteKeys [][]byte = make([][]byte, len(prefixes)) + + for i, key := range prefixes { + byteKeys[i] = []byte(key) + } + + ssIterator, err := partitionsEndpoint.ClusterFacade.LocalGetMatches(partitionID, siteID, bucket, byteKeys) + + if err == ENoSuchPartition || err == ENoSuchBucket || err == ENoSuchSite { + var responseBody string + + switch err { + case ENoSuchBucket: + responseBody = string(EBucketDoesNotExist.JSON()) + case ENoSuchSite: + responseBody = string(ESiteDoesNotExist.JSON()) + } + + Log.Warningf("POST /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/keys: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, responseBody + "\n") + + return + } + + if err != nil { + Log.Warningf("GET /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/keys: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + var entries []InternalEntry = make([]InternalEntry, 0) + + for ssIterator.Next() { + var nextEntry InternalEntry = InternalEntry{ + Prefix: string(ssIterator.Prefix()), + Key: string(ssIterator.Key()), + Siblings: ssIterator.Value(), + } + + entries = append(entries, nextEntry) + } + + if ssIterator.Error() != nil { + Log.Warningf("GET /partitions/{partitionID}/sites/{siteID}/buckets/{bucketID}/keys: %v", ssIterator.Error().Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + encodedEntries, _ := json.Marshal(entries) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedEntries) + "\n") + + return + } + }).Methods("GET") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/profile.go b/vendor/github.com/armPelionEdge/devicedb/routes/profile.go new file mode 100644 index 0000000..eeb10f5 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/profile.go @@ -0,0 +1,40 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/gorilla/mux" + "net/http/pprof" +) + +type ProfilerEndpoint struct { +} + +func (profiler *ProfilerEndpoint) Attach(router *mux.Router) { + router.HandleFunc("/debug/pprof/", pprof.Index) + router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + router.HandleFunc("/debug/pprof/profile", pprof.Profile) + router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/prometheus.go b/vendor/github.com/armPelionEdge/devicedb/routes/prometheus.go new file mode 100644 index 0000000..ae135d4 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/prometheus.go @@ -0,0 +1,37 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/gorilla/mux" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +type PrometheusEndpoint struct { +} + +func (prometheusEndpoint *PrometheusEndpoint) Attach(router *mux.Router) { + router.Handle("/metrics", promhttp.Handler()) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/relays.go b/vendor/github.com/armPelionEdge/devicedb/routes/relays.go new file mode 100644 index 0000000..77143f3 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/relays.go @@ -0,0 +1,184 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/gorilla/mux" + "encoding/json" + "io" + "io/ioutil" + "net/http" + + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" +) + +type RelaysEndpoint struct { + ClusterFacade ClusterFacade +} + +func (relaysEndpoint *RelaysEndpoint) Attach(router *mux.Router) { + router.HandleFunc("/relays/{relayID}", func(w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + + if err != nil { + Log.Warningf("PATCH /relays/{relayID}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EReadBody.JSON()) + "\n") + + return + } + + var relayPatch RelaySettingsPatch + + if err := json.Unmarshal(body, &relayPatch); err != nil { + Log.Warningf("PATCH /relays/{relayID}: Unable to parse relay settings body") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EReadBody.JSON()) + "\n") + + return + } + + err = relaysEndpoint.ClusterFacade.MoveRelay(r.Context(), mux.Vars(r)["relayID"], relayPatch.Site) + + if err == ENoSuchRelay { + Log.Warningf("PATCH /relays/{relayID}: Relay does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ERelayDoesNotExist.JSON()) + "\n") + + return + } + + if err == ENoSuchSite { + Log.Warningf("PATCH /relays/{relayID}: Site does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + if err != nil { + Log.Warningf("PATCH /relays/{relayID}: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("PATCH") + + // Add relay or move it to a site + router.HandleFunc("/relays/{relayID}", func(w http.ResponseWriter, r *http.Request) { + err := relaysEndpoint.ClusterFacade.AddRelay(r.Context(), mux.Vars(r)["relayID"]) + + if err != nil { + Log.Warningf("PUT /relays/{relayID}: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("PUT") + + // Remove a relay and disassociate it from a site + router.HandleFunc("/relays/{relayID}", func(w http.ResponseWriter, r *http.Request) { + err := relaysEndpoint.ClusterFacade.RemoveRelay(r.Context(), mux.Vars(r)["relayID"]) + + if err != nil { + Log.Warningf("DELETE /relays/{relayID}: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("DELETE") + + // Get the status of a relay + router.HandleFunc("/relays/{relayID}", func(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query() + _, local := query["local"] + + var relayStatus RelayStatus + var err error + + if local { + relayStatus, err = relaysEndpoint.ClusterFacade.LocalGetRelayStatus(mux.Vars(r)["relayID"]) + } else { + relayStatus, err = relaysEndpoint.ClusterFacade.GetRelayStatus(r.Context(), mux.Vars(r)["relayID"]) + } + + if err == ERelayDoesNotExist { + Log.Warningf("GET /relays/{relayID}: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ERelayDoesNotExist.JSON()) + "\n") + + return + } + + if err != nil { + Log.Warningf("GET /relays/{relayID}: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + encodedStatus, err := json.Marshal(relayStatus) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedStatus) + "\n") + }).Methods("GET") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/sites.go b/vendor/github.com/armPelionEdge/devicedb/routes/sites.go new file mode 100644 index 0000000..ada51e8 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/sites.go @@ -0,0 +1,406 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "io" + "io/ioutil" + "github.com/gorilla/mux" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "net/http" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/transport" +) + +var ( + prometheusRequestDurations = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "sites", + Subsystem: "devicedb", + Name: "request_durations_seconds", + Help: "The duration of each request", + Buckets: []float64{ 0.05, 0.25, 0.45, 0.65, 0.85, 1.05, 1.25, 1.45, 1.65, 1.85, 5, 10 }, + }, []string{ + "handler", + "code", + }) + + prometheusRequestCounts = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "sites", + Subsystem: "devicedb", + Name: "request_counts", + Help: "The number of requests", + }, []string{ + "handler", + "code", + }) +) + +func init() { + prometheus.MustRegister(prometheusRequestDurations, prometheusRequestCounts) +} + +type SitesEndpoint struct { + ClusterFacade ClusterFacade +} + +func (sitesEndpoint *SitesEndpoint) Attach(outerRouter *mux.Router) { + var router *mux.Router = mux.NewRouter() + + outerRouter.PathPrefix("/sites/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var routeMatch mux.RouteMatch + + if router.Match(r, &routeMatch) { + var labels prometheus.Labels = prometheus.Labels{ + "handler": routeMatch.Route.GetName(), + } + + promhttp.InstrumentHandlerDuration(prometheusRequestDurations.MustCurryWith(labels), + promhttp.InstrumentHandlerCounter(prometheusRequestCounts.MustCurryWith(labels), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + router.ServeHTTP(w, r) + })), + )(w, r) + + return + } + + router.ServeHTTP(w, r) + }) + + // Add a site + router.HandleFunc("/sites/{siteID}", func(w http.ResponseWriter, r *http.Request) { + err := sitesEndpoint.ClusterFacade.AddSite(r.Context(), mux.Vars(r)["siteID"]) + + if err != nil { + Log.Warningf("PUT /relays/{relayID}: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("PUT").Name("add_site") + + // Remove a site + router.HandleFunc("/sites/{siteID}", func(w http.ResponseWriter, r *http.Request) { + err := sitesEndpoint.ClusterFacade.RemoveSite(r.Context(), mux.Vars(r)["siteID"]) + + if err != nil { + Log.Warningf("DELETE /relays/{relayID}: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("DELETE").Name("remove_site") + + // Submit an update to a bucket + router.HandleFunc("/sites/{siteID}/buckets/{bucket}/batches", func(w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + + if err != nil { + Log.Warningf("POST /sites/{siteID}/buckets/{bucket}/batches: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EReadBody.JSON()) + "\n") + + return + } + + var transportBatch TransportUpdateBatch + + if err := json.Unmarshal(body, &transportBatch); err != nil { + Log.Warningf("POST /sites/{siteID}/buckets/{bucket}/batches: Unable to parse update batch") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EReadBody.JSON()) + "\n") + + return + } + + var updateBatch UpdateBatch + + err = transportBatch.ToUpdateBatch(&updateBatch) + + if err != nil { + Log.Warningf("POST /sites/{siteID}/buckets/{bucket}/batches: Invalid update batch") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EReadBody.JSON()) + "\n") + + return + } + + batchResult, err := sitesEndpoint.ClusterFacade.Batch(mux.Vars(r)["siteID"], mux.Vars(r)["bucket"], &updateBatch) + + if err == ENoSuchSite { + Log.Warningf("POST /sites/{siteID}/buckets/{bucket}/batches: Site does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + if err == ENoSuchBucket { + Log.Warningf("POST /sites/{siteID}/buckets/{bucket}/batches: Site does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n") + + return + } + + batchResult.Quorum = true + + if err == ENoQuorum { + Log.Warningf("POST /sites/{siteID}/buckets/{bucket}/batches: Write failed at some replicas") + batchResult.Quorum = false + } else if err != nil { + Log.Warningf("POST /sites/{siteID}/buckets/{bucket}/batches: Site does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(EStorage.JSON()) + "\n") + + return + } + + encodedBatchResult, _ := json.Marshal(batchResult) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedBatchResult) + "\n") + }).Methods("POST").Name("update_bucket") + + // Query keys in bucket + router.HandleFunc("/sites/{siteID}/buckets/{bucket}/keys", func(w http.ResponseWriter, r *http.Request) { + //sitesEndpoint.ClusterFacade.Get(siteID, bucket, keys) + //sitesEndpoint.ClusterFacade.GetMatches(siteID, bucket, keys) + //Returns APIEntry + query := r.URL.Query() + keys := query["key"] + prefixes := query["prefix"] + + if len(keys) != 0 && len(prefixes) != 0 { + Log.Warningf("GET /sites/{siteID}/buckets/{bucketID}/keys: Client specified both prefixes and keys in the same request") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + return + } + + if len(keys) == 0 && len(prefixes) == 0 { + var entries []APIEntry = []APIEntry{ } + encodedEntries, _ := json.Marshal(entries) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedEntries) + "\n") + + return + } + + var siteID string = mux.Vars(r)["siteID"] + var bucket string = mux.Vars(r)["bucket"] + + if len(keys) > 0 { + var byteKeys [][]byte = make([][]byte, len(keys)) + + for i, key := range keys { + byteKeys[i] = []byte(key) + } + + siblingSets, err := sitesEndpoint.ClusterFacade.Get(siteID, bucket, byteKeys) + + if err == ENoSuchSite { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/keys: Site does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + if err == ENoSuchBucket { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/keys: Bucket does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n") + + return + } + + if err == ENoQuorum { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/keys: Read quorum could not be established") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(ENoQuorum.JSON()) + "\n") + + return + } + + if err != nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/keys: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(EStorage.JSON()) + "\n") + + return + } + + var entries []APIEntry = make([]APIEntry, len(siblingSets)) + + for i, key := range keys { + internalEntry := InternalEntry{ + Prefix: "", + Key: key, + Siblings: siblingSets[i], + } + + entries[i] = *internalEntry.ToAPIEntry() + } + + encodedEntries, _ := json.Marshal(entries) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedEntries) + "\n") + + return + } + + if len(prefixes) > 0 { + var byteKeys [][]byte = make([][]byte, len(prefixes)) + + for i, key := range prefixes { + byteKeys[i] = []byte(key) + } + + ssIterator, err := sitesEndpoint.ClusterFacade.GetMatches(siteID, bucket, byteKeys) + + if err == ENoSuchSite { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/keys: Site does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + if err == ENoSuchBucket { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/keys: Bucket does not exist") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n") + + return + } + + if err == ENoQuorum { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/keys: Read quorum could not be established") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(ENoQuorum.JSON()) + "\n") + + return + } + + if err != nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/keys: %v", err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(EStorage.JSON()) + "\n") + + return + } + + var entries []APIEntry = make([]APIEntry, 0) + + for ssIterator.Next() { + if ssIterator.Value().IsTombstoneSet() { + continue + } + + internalEntry := InternalEntry{ + Prefix: string(ssIterator.Prefix()), + Key: string(ssIterator.Key()), + Siblings: ssIterator.Value(), + } + + entries = append(entries, *internalEntry.ToAPIEntry()) + } + + if ssIterator.Error() != nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucketID}/keys: %v", ssIterator.Error().Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(EStorage.JSON()) + "\n") + + return + } + + encodedEntries, _ := json.Marshal(entries) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedEntries) + "\n") + + return + } + }).Methods("GET").Name("read_bucket") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/snapshot.go b/vendor/github.com/armPelionEdge/devicedb/routes/snapshot.go new file mode 100644 index 0000000..f8dfb3a --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/snapshot.go @@ -0,0 +1,163 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "github.com/gorilla/mux" + "io" + "net/http" + + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" +) + +type SnapshotEndpoint struct { + ClusterFacade ClusterFacade +} + +func (snapshotEndpoint *SnapshotEndpoint) Attach(router *mux.Router) { + router.HandleFunc("/snapshot", func(w http.ResponseWriter, r *http.Request) { + snapshot, err := snapshotEndpoint.ClusterFacade.ClusterSnapshot(r.Context()) + + if err != nil { + Log.Warningf("POST /snapshot: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, err.Error()) + + return + } + + snapshot.Status = SnapshotProcessing + + encodedSnapshot, err := json.Marshal(snapshot) + + if err != nil { + Log.Warningf("POST /snapshot: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedSnapshot)) + }).Methods("POST") + + // This endpoint definition must come before the one for /snapshot/{snapshotId}.tar or it will be overridden + router.HandleFunc("/snapshot/{snapshotId}.tar", func(w http.ResponseWriter, r *http.Request) { + var snapshotId string = mux.Vars(r)["snapshotId"] + + err := snapshotEndpoint.ClusterFacade.CheckLocalSnapshotStatus(snapshotId) + + if err == ESnapshotInProgress { + Log.Warningf("GET /snapshot/{snapshotId}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESnapshotInProgress.JSON())) + + return + } else if err == ESnapshotOpenFailed { + Log.Warningf("GET /snapshot/{snapshotId}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESnapshotOpenFailed.JSON())) + + return + } else if err == ESnapshotReadFailed { + Log.Warningf("GET /snapshot/{snapshotId}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESnapshotReadFailed.JSON())) + + return + } else if err != nil { + Log.Warningf("GET /snapshot/{snapshotId}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(EStorage.JSON())) + + return + } + + w.Header().Set("Content-Type", "application/octet-stream") + w.WriteHeader(http.StatusOK) + err = snapshotEndpoint.ClusterFacade.WriteLocalSnapshot(snapshotId, w) + + if err != nil { + Log.Errorf("GET /snapshot/{snapshotId}: %v", err) + } + }).Methods("GET") + + router.HandleFunc("/snapshot/{snapshotId}", func(w http.ResponseWriter, r *http.Request) { + var snapshotId string = mux.Vars(r)["snapshotId"] + var snapshot Snapshot = Snapshot{ UUID: snapshotId } + + err := snapshotEndpoint.ClusterFacade.CheckLocalSnapshotStatus(snapshotId) + + if err == ESnapshotInProgress { + snapshot.Status = SnapshotProcessing + } else if err == ESnapshotOpenFailed { + snapshot.Status = SnapshotMissing + } else if err == ESnapshotReadFailed { + snapshot.Status = SnapshotFailed + } else if err != nil { + Log.Warningf("GET /snapshot/{snapshotId}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, err.Error()) + + return + } else { + snapshot.Status = SnapshotComplete + } + + encodedSnapshot, err := json.Marshal(snapshot) + + if err != nil { + Log.Warningf("GET /snapshot/{snapshotId}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(encodedSnapshot)) + }).Methods("GET") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/routes/sync.go b/vendor/github.com/armPelionEdge/devicedb/routes/sync.go new file mode 100644 index 0000000..eb64bd5 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/routes/sync.go @@ -0,0 +1,52 @@ +package routes +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/gorilla/mux" + "github.com/gorilla/websocket" + "net/http" + + . "github.com/armPelionEdge/devicedb/logging" +) + +type SyncEndpoint struct { + ClusterFacade ClusterFacade + Upgrader websocket.Upgrader +} + +func (syncEndpoint *SyncEndpoint) Attach(router *mux.Router) { + router.HandleFunc("/sync", func(w http.ResponseWriter, r *http.Request) { + conn, err := syncEndpoint.Upgrader.Upgrade(w, r, nil) + + if err != nil { + Log.Errorf("Unable to upgrade connection: %v", err.Error()) + + return + } + + syncEndpoint.ClusterFacade.AcceptRelayConnection(conn, r.Header) + }).Methods("GET") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/server/cloud_server.go b/vendor/github.com/armPelionEdge/devicedb/server/cloud_server.go new file mode 100644 index 0000000..72ddc66 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/server/cloud_server.go @@ -0,0 +1,184 @@ +package server +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "crypto/tls" + "net" + "net/http" + "time" + "strconv" + "github.com/gorilla/mux" + "sync" + + . "github.com/armPelionEdge/devicedb/logging" +) + +type CloudServerConfig struct { + NodeID uint64 + ExternalPort int + ExternalHost string + InternalPort int + InternalHost string + RelayTLSConfig *tls.Config +} + +type CloudServer struct { + httpServer *http.Server + relayHTTPServer *http.Server + listener net.Listener + relayListener net.Listener + seedPort int + seedHost string + externalPort int + externalHost string + internalPort int + internalHost string + relayTLSConfig *tls.Config + router *mux.Router + stop chan int + nodeID uint64 +} + +func NewCloudServer(serverConfig CloudServerConfig) *CloudServer { + server := &CloudServer{ + externalHost: serverConfig.ExternalHost, + externalPort: serverConfig.ExternalPort, + internalHost: serverConfig.InternalHost, + internalPort: serverConfig.InternalPort, + relayTLSConfig: serverConfig.RelayTLSConfig, + nodeID: serverConfig.NodeID, + router: mux.NewRouter(), + } + + return server +} + +func (server *CloudServer) ExternalPort() int { + return server.externalPort +} + +func (server *CloudServer) ExternalHost() string { + return server.externalHost +} + +func (server *CloudServer) InternalPort() int { + return server.internalPort +} + +func (server *CloudServer) InternalHost() string { + return server.internalHost +} + +func (server *CloudServer) Router() *mux.Router { + return server.router +} + +func (server *CloudServer) IsHTTPOnly() bool { + return server.externalHost == "" +} + +func (server *CloudServer) Start() error { + server.stop = make(chan int) + + server.httpServer = &http.Server{ + Handler: server.router, + WriteTimeout: 45 * time.Second, + ReadTimeout: 45 * time.Second, + } + + server.relayHTTPServer = &http.Server{ + Handler: server.router, + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + var listener net.Listener + var relayListener net.Listener + var err error + + listener, err = net.Listen("tcp", server.InternalHost() + ":" + strconv.Itoa(server.InternalPort())) + + if err != nil { + Log.Errorf("Error listening on port %d: %v", server.InternalPort(), err.Error()) + + server.Stop() + + return err + } + + server.listener = listener + + if !server.IsHTTPOnly() { + relayListener, err = tls.Listen("tcp", server.ExternalHost() + ":" + strconv.Itoa(server.ExternalPort()), server.relayTLSConfig) + + if err != nil { + Log.Errorf("Error setting up relay listener on port %d: %v", server.ExternalPort(), err.Error()) + + server.Stop() + + return err + } + + server.relayListener = relayListener + + Log.Infof("Listening external (%s:%d), internal (%s:%d)", server.ExternalHost(), server.ExternalPort(), server.InternalHost(), server.InternalPort()) + } else { + Log.Infof("Listening (HTTP Only) (%s:%d)", server.InternalHost(), server.InternalPort()) + } + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + err = server.httpServer.Serve(server.listener) + server.Stop() // to ensure all other listeners shutdown + wg.Done() + }() + + if !server.IsHTTPOnly() { + go func() { + err = server.relayHTTPServer.Serve(server.relayListener) + server.Stop() // to ensure all other listeners shutdown + wg.Done() + }() + } + + wg.Wait() + + Log.Errorf("Server shutting down. Reason: %v", err) + + return err +} + +func (server *CloudServer) Stop() { + if server.listener != nil { + server.listener.Close() + } + + if server.relayListener != nil { + server.relayListener.Close() + } +} diff --git a/vendor/github.com/armPelionEdge/devicedb/server/event.go b/vendor/github.com/armPelionEdge/devicedb/server/event.go new file mode 100644 index 0000000..1260d0d --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/server/event.go @@ -0,0 +1,58 @@ +package server +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/armPelionEdge/devicedb/historian" + "encoding/json" +) + +type event struct { + Device string `json:"device"` + Event string `json:"event"` + Metadata interface{} `json:"metadata"` + Timestamp uint64 `json:"timestamp"` +} + +func MakeeventsFromEvents(es []*historian.Event) []*event { + var events []*event = make([]*event, len(es)) + + for i, e := range es { + var metadata interface{} + + if err := json.Unmarshal([]byte(e.Data), &metadata); err != nil { + metadata = e.Data + } + + events[i] = &event{ + Device: e.SourceID, + Event: e.Type, + Metadata: metadata, + Timestamp: e.Timestamp, + } + } + + return events +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/server/peer.go b/vendor/github.com/armPelionEdge/devicedb/server/peer.go new file mode 100644 index 0000000..14693e5 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/server/peer.go @@ -0,0 +1,1568 @@ +package server +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/gorilla/websocket" + "github.com/prometheus/client_golang/prometheus" + "sync" + "errors" + "time" + "crypto/tls" + crand "crypto/rand" + "fmt" + "encoding/binary" + "encoding/json" + "strconv" + "net/http" + "io/ioutil" + "bytes" + + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/historian" + . "github.com/armPelionEdge/devicedb/alerts" + . "github.com/armPelionEdge/devicedb/logging" + ddbSync "github.com/armPelionEdge/devicedb/sync" +) + +const ( + INCOMING = iota + OUTGOING = iota +) + +const SYNC_SESSION_WAIT_TIMEOUT_SECONDS = 5 +const RECONNECT_WAIT_MAX_SECONDS = 32 +const WRITE_WAIT_SECONDS = 10 +const PONG_WAIT_SECONDS = 60 +const PING_PERIOD_SECONDS = 40 +const CLOUD_PEER_ID = "cloud" + +var ( + prometheusRelayConnectionsGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "relays", + Subsystem: "devicedb_internal", + Name: "connections", + Help: "The number of current relay connections", + }) +) + +func init() { + prometheus.MustRegister(prometheusRelayConnectionsGauge) +} + +func randomID() string { + randomBytes := make([]byte, 16) + crand.Read(randomBytes) + + high := binary.BigEndian.Uint64(randomBytes[:8]) + low := binary.BigEndian.Uint64(randomBytes[8:]) + + return fmt.Sprintf("%016x%016x", high, low) +} + +type PeerJSON struct { + Direction string `json:"direction"` + ID string `json:"id"` + Status string `json:"status"` +} + +type Peer struct { + id string + connection *websocket.Conn + direction int + closed bool + closeChan chan bool + doneChan chan bool + csLock sync.Mutex + rttLock sync.Mutex + roundTripTime time.Duration + result error + uri string + historyURI string + alertsURI string + partitionNumber uint64 + siteID string + httpClient *http.Client + httpHistoryClient *http.Client + httpAlertsClient *http.Client + identityHeader string +} + +func NewPeer(id string, direction int) *Peer { + return &Peer{ + id: id, + direction: direction, + closeChan: make(chan bool, 1), + } +} + +func (peer *Peer) errors() error { + return peer.result +} + +func (peer *Peer) accept(connection *websocket.Conn) (chan *SyncMessageWrapper, chan *SyncMessageWrapper, error) { + peer.csLock.Lock() + defer peer.csLock.Unlock() + + if peer.closed { + return nil, nil, errors.New("Peer closed") + } + + peer.connection = connection + + incoming, outgoing := peer.establishChannels() + + return incoming, outgoing, nil +} + +func (peer *Peer) connect(dialer *websocket.Dialer, uri string) (chan *SyncMessageWrapper, chan *SyncMessageWrapper, error) { + reconnectWaitSeconds := 1 + + peer.uri = uri + peer.httpClient = &http.Client{ Transport: &http.Transport{ TLSClientConfig: dialer.TLSClientConfig } } + + var header http.Header = make(http.Header) + + if peer.identityHeader != "" { + header.Set("X-WigWag-RelayID", peer.identityHeader) + } + + for { + peer.connection = nil + + conn, _, err := dialer.Dial(uri, header) + + if err != nil { + Log.Warningf("Unable to connect to peer %s at %s: %v. Reconnecting in %ds...", peer.id, uri, err, reconnectWaitSeconds) + + select { + case <-time.After(time.Second * time.Duration(reconnectWaitSeconds)): + case <-peer.closeChan: + Log.Debugf("Cancelled connection retry sequence for %s", peer.id) + + return nil, nil, errors.New("Peer closed") + } + + if reconnectWaitSeconds != RECONNECT_WAIT_MAX_SECONDS { + reconnectWaitSeconds *= 2 + } + } else { + peer.csLock.Lock() + defer peer.csLock.Unlock() + + if !peer.closed { + peer.connection = conn + + incoming, outgoing := peer.establishChannels() + + return incoming, outgoing, nil + } + + Log.Debugf("Cancelled connection retry sequence for %s", peer.id) + + closeWSConnection(conn, websocket.CloseNormalClosure) + + return nil, nil, errors.New("Peer closed") + } + } +} + +func (peer *Peer) establishChannels() (chan *SyncMessageWrapper, chan *SyncMessageWrapper) { + connection := peer.connection + peer.doneChan = make(chan bool, 1) + + incoming := make(chan *SyncMessageWrapper) + outgoing := make(chan *SyncMessageWrapper) + + go func() { + pingTicker := time.NewTicker(time.Second * PING_PERIOD_SECONDS) + + for { + select { + case msg, ok := <-outgoing: + // this lock ensures mutual exclusion with close message sending in peer.close() + peer.csLock.Lock() + connection.SetWriteDeadline(time.Now().Add(time.Second * WRITE_WAIT_SECONDS)) + + if !ok { + connection.Close() + peer.csLock.Unlock() + return + } + + err := connection.WriteJSON(msg) + peer.csLock.Unlock() + + if err != nil { + Log.Errorf("Error writing to websocket for peer %s: %v", peer.id, err) + //return + } + case <-pingTicker.C: + // this lock ensures mutual exclusion with close message sending in peer.close() + Log.Infof("Sending a ping to peer %s", peer.id) + peer.csLock.Lock() + connection.SetWriteDeadline(time.Now().Add(time.Second * WRITE_WAIT_SECONDS)) + + encodedPingTime, _ := time.Now().MarshalJSON() + if err := connection.WriteMessage(websocket.PingMessage, encodedPingTime); err != nil { + Log.Errorf("Unable to send ping to peer %s: %v", peer.id, err.Error()) + } + + peer.csLock.Unlock() + } + } + }() + + // incoming, outgoing, err + go func() { + defer close(peer.doneChan) + + connection.SetPongHandler(func(encodedPingTime string) error { + var pingTime time.Time + + if err := pingTime.UnmarshalJSON([]byte(encodedPingTime)); err == nil { + Log.Infof("Received pong from peer %s. Round trip time: %v", peer.id, time.Since(pingTime)) + peer.setRoundTripTime(time.Since(pingTime)) + } else { + Log.Infof("Received pong from peer %s", peer.id) + } + + connection.SetReadDeadline(time.Now().Add(time.Second * PONG_WAIT_SECONDS)) + + return nil + }) + + for { + var nextRawMessage rawSyncMessageWrapper + var nextMessage SyncMessageWrapper + + // The pong handler is invoked in the same goroutine as ReadJSON (ReadJSON calls NextReader which calls advanceFrame which will invoke the pong handler + // if it receives a pong frame). If the pong handler is invoked it will call SetReadDeadline in the same goroutine. In case a pong is never received + // to reset the read deadline it it necessary to set the read deadline before every call to ReadJSON(). There was a bug where connections that were broken + // were never receiving any data but kept attempting writes. This was because the read deadline was met by receiving a data frame right before the connection + // broke and then no pong was ever received to again set the read deadline for the next call to ReadJSON() so ReadJSON() just hung so the broken connections + // were never cleaned up. + connection.SetReadDeadline(time.Now().Add(time.Second * PONG_WAIT_SECONDS)) + + err := connection.ReadJSON(&nextRawMessage) + + if err != nil { + if err.Error() == "websocket: close 1000 (normal)" { + Log.Infof("Received a normal websocket close message from peer %s", peer.id) + } else { + Log.Errorf("Peer %s sent a misformatted message. Unable to parse: %v", peer.id, err) + } + + peer.result = err + + close(incoming) + + return + } + + nextMessage.SessionID = nextRawMessage.SessionID + nextMessage.MessageType = nextRawMessage.MessageType + nextMessage.Direction = nextRawMessage.Direction + + err = peer.typeCheck(&nextRawMessage, &nextMessage) + + if err != nil { + peer.result = err + + close(incoming) + + return + } + + nextMessage.nodeID = peer.id + + incoming <- &nextMessage + } + }() + + return incoming, outgoing +} + +func (peer *Peer) setRoundTripTime(duration time.Duration) { + peer.rttLock.Lock() + defer peer.rttLock.Unlock() + + peer.roundTripTime = duration +} + +func (peer *Peer) getRoundTripTime() time.Duration { + peer.rttLock.Lock() + defer peer.rttLock.Unlock() + + return peer.roundTripTime +} + +func (peer *Peer) typeCheck(rawMsg *rawSyncMessageWrapper, msg *SyncMessageWrapper) error { + var err error = nil + + switch msg.MessageType { + case SYNC_START: + var start Start + err = json.Unmarshal(rawMsg.MessageBody, &start) + msg.MessageBody = start + case SYNC_ABORT: + var abort Abort + err = json.Unmarshal(rawMsg.MessageBody, &abort) + msg.MessageBody = abort + case SYNC_NODE_HASH: + var nodeHash MerkleNodeHash + err = json.Unmarshal(rawMsg.MessageBody, &nodeHash) + msg.MessageBody = nodeHash + case SYNC_OBJECT_NEXT: + var objectNext ObjectNext + err = json.Unmarshal(rawMsg.MessageBody, &objectNext) + msg.MessageBody = objectNext + case SYNC_PUSH_MESSAGE: + var pushMessage PushMessage + err = json.Unmarshal(rawMsg.MessageBody, &pushMessage) + msg.MessageBody = pushMessage + case SYNC_PUSH_DONE: + var pushDoneMessage PushDone + err = json.Unmarshal(rawMsg.MessageBody, &pushDoneMessage) + msg.MessageBody = pushDoneMessage + } + + return err +} + +func (peer *Peer) close(closeCode int) { + peer.csLock.Lock() + defer peer.csLock.Unlock() + + if !peer.closed { + peer.closeChan <- true + peer.closed = true + } + + if peer.connection != nil { + err := peer.connection.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(closeCode, "")) + + if err != nil { + return + } + + select { + case <-peer.doneChan: + case <-time.After(time.Second): + } + + peer.connection.Close() + } +} + +func (peer *Peer) isClosed() bool { + return peer.closed +} + +func (peer *Peer) toJSON(peerID string) *PeerJSON { + var direction string + var status string + + if peer.direction == INCOMING { + direction = "incoming" + } else { + direction = "outgoing" + } + + if peer.connection == nil { + status = "down" + } else { + status = "up" + } + + return &PeerJSON{ + Direction: direction, + Status: status, + ID: peerID, + } +} + +func (peer *Peer) useHistoryServer(tlsBaseConfig *tls.Config, historyServerName string, historyURI string, alertsServerName string, alertsURI string, noValidate bool) { + peer.alertsURI = alertsURI + peer.historyURI = historyURI + + if(tlsBaseConfig == nil) { + Log.Infof(" Starting clients with no tls\n") + tlsBaseConfig = &tls.Config{} + } + tlsConfig := *tlsBaseConfig + tlsConfig.InsecureSkipVerify = noValidate + tlsConfig.ServerName = historyServerName + tlsConfig.RootCAs = nil // ensures that this uses the default root CAs + + peer.httpHistoryClient = &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tlsConfig } } + + tlsConfig = *tlsBaseConfig + tlsConfig.InsecureSkipVerify = noValidate + tlsConfig.ServerName = alertsServerName + tlsConfig.RootCAs = nil + + peer.httpAlertsClient = &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tlsConfig } } +} + +func (peer *Peer) pushEvents(events []*Event) error { + // try to forward event to the cloud if failed or error response then return + eventsJSON, _ := json.Marshal(MakeeventsFromEvents(events)) + request, err := http.NewRequest("POST", peer.historyURI, bytes.NewReader(eventsJSON)) + + if err != nil { + return err + } + + request.Header.Add("Content-Type", "application/json") + + resp, err := peer.httpHistoryClient.Do(request) + + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + errorMessage, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + return errors.New(fmt.Sprintf("Received error code from server: (%d) %s", resp.StatusCode, string(errorMessage))) + } + + return nil +} + +func (peer *Peer) pushAlerts(alerts map[string]Alert) error { + var alertsList []Alert = make([]Alert, 0, len(alerts)) + + for _, alert := range alerts { + alertsList = append(alertsList, alert) + } + + alertsJSON, _ := json.Marshal(alertsList) + request, err := http.NewRequest("POST", peer.alertsURI, bytes.NewReader(alertsJSON)) + + if err != nil { + return err + } + + request.Header.Add("Content-Type", "application/json") + + resp, err := peer.httpHistoryClient.Do(request) + + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + errorMessage, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + return errors.New(fmt.Sprintf("Received error code from server: (%d) %s", resp.StatusCode, string(errorMessage))) + } + + return nil +} + +func closeWSConnection(conn *websocket.Conn, closeCode int) { + done := make(chan bool) + + go func() { + defer close(done) + + for { + _, _, err := conn.ReadMessage() + + if err != nil { + return + } + } + }() + + err := conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(closeCode, "")) + + if err != nil { + return + } + + select { + case <-done: + case <-time.After(time.Second): + } + + conn.Close() +} + +type Hub struct { + id string + tlsConfig *tls.Config + peerMapLock sync.Mutex + peerMap map[string]*Peer + peerMapByPartitionNumber map[uint64]map[string]*Peer + peerMapBySiteID map[string]map[string]*Peer + syncController *SyncController + forwardEvents chan int + forwardAlerts chan int + historian *Historian + alertsMap *AlertMap + purgeOnForward bool + forwardBatchSize uint64 + forwardThreshold uint64 + forwardInterval uint64 + alertsForwardInterval uint64 +} + +func NewHub(id string, syncController *SyncController, tlsConfig *tls.Config) *Hub { + hub := &Hub{ + syncController: syncController, + tlsConfig: tlsConfig, + peerMap: make(map[string]*Peer), + peerMapByPartitionNumber: make(map[uint64]map[string]*Peer), + peerMapBySiteID: make(map[string]map[string]*Peer), + id: id, + forwardEvents: make(chan int, 1), + forwardAlerts: make(chan int, 1), + } + + return hub +} + +func (hub *Hub) Accept(connection *websocket.Conn, partitionNumber uint64, relayID string, siteID string, noValidate bool) error { + conn := connection.UnderlyingConn() + + if _, ok := conn.(*tls.Conn); ok || relayID != "" { + var peerID string + var err error + + if _, ok := conn.(*tls.Conn); ok { + peerID, err = hub.ExtractPeerID(conn.(*tls.Conn)) + } else { + peerID = relayID + } + + if err != nil { + if !noValidate { + Log.Warningf("Unable to accept peer connection: %v", err) + + closeWSConnection(connection, websocket.CloseNormalClosure) + + return err + } + + peerID = relayID + } + + if noValidate && relayID != "" { + peerID = relayID + } + + if peerID == "" { + Log.Warningf("Unable to accept peer connection") + + closeWSConnection(connection, websocket.CloseNormalClosure) + + return errors.New("Relay id not known") + } + + go func() { + peer := NewPeer(peerID, INCOMING) + peer.partitionNumber = partitionNumber + peer.siteID = siteID + + if !hub.register(peer) { + Log.Warningf("Rejected peer connection from %s because that peer is already connected", peerID) + + closeWSConnection(connection, websocket.CloseTryAgainLater) + + return + } + + incoming, outgoing, err := peer.accept(connection) + + if err != nil { + Log.Errorf("Unable to accept peer connection from %s: %v. Closing connection and unregistering peer", peerID, err) + + closeWSConnection(connection, websocket.CloseNormalClosure) + + hub.unregister(peer) + + return + } + + Log.Infof("Accepted peer connection from %s", peerID) + + hub.syncController.addPeer(peer.id, outgoing) + + for msg := range incoming { + hub.syncController.incoming <- msg + } + + hub.syncController.removePeer(peer.id) + hub.unregister(peer) + + Log.Infof("Disconnected from peer %s", peerID) + }() + } else { + return errors.New("Cannot accept non-secure connections") + } + + return nil +} + +func (hub *Hub) ConnectCloud(serverName, uri, historyServerName, historyURI, alertsServerName, alertsURI string, noValidate bool) error { + if noValidate { + Log.Warningf("The cloud.noValidate option is set to true. The cloud server's certificate chain and identity will not be verified. !!! THIS OPTION SHOULD NOT BE SET TO TRUE IN PRODUCTION !!!") + } + + dialer, err := hub.dialer(serverName, noValidate, true) + + if err != nil { + return err + } + + go func() { + peer := NewPeer(CLOUD_PEER_ID, OUTGOING) + + // simply try to reserve a spot in the peer map + if !hub.register(peer) { + return + } + + for { + // connect will return an error once the peer is disconnected for good + peer.useHistoryServer(hub.tlsConfig, historyServerName, historyURI, alertsServerName, alertsURI, noValidate) + peer.identityHeader = hub.id + incoming, outgoing, err := peer.connect(dialer, uri) + + if err != nil { + break + } + + Log.Infof("Connected to devicedb cloud") + + hub.syncController.addPeer(peer.id, outgoing) + + // incoming is closed when the peer is disconnected from either end + for msg := range incoming { + hub.syncController.incoming <- msg + } + + hub.syncController.removePeer(peer.id) + + if websocket.IsCloseError(peer.errors(), websocket.CloseNormalClosure) { + Log.Infof("Disconnected from devicedb cloud") + + break + } + + Log.Infof("Disconnected from devicedb cloud. Reconnecting...") + <-time.After(time.Second) + } + + hub.unregister(peer) + }() + + return nil +} + +func (hub *Hub) Connect(peerID, host string, port int) error { + dialer, err := hub.dialer(peerID, false, false) + + if peerID == CLOUD_PEER_ID { + Log.Warningf("Peer ID is not allowed to be %s since it is reserved for the cloud connection. This node will not connect to this peer", CLOUD_PEER_ID) + + return errors.New("Peer ID is not allowed to be " + CLOUD_PEER_ID) + } + + if err != nil { + return err + } + + go func() { + peer := NewPeer(peerID, OUTGOING) + + // simply try to reserve a spot in the peer map + if !hub.register(peer) { + return + } + + for { + // connect will return an error once the peer is disconnected for good + incoming, outgoing, err := peer.connect(dialer, "wss://" + host + ":" + strconv.Itoa(port) + "/sync") + + if err != nil { + break + } + + Log.Infof("Connected to peer %s", peer.id) + + hub.syncController.addPeer(peer.id, outgoing) + + // incoming is closed when the peer is disconnected from either end + for msg := range incoming { + hub.syncController.incoming <- msg + } + + hub.syncController.removePeer(peer.id) + + if websocket.IsCloseError(peer.errors(), websocket.CloseNormalClosure) { + Log.Infof("Disconnected from peer %s", peer.id) + + break + } + + Log.Infof("Disconnected from peer %s. Reconnecting...", peer.id) + <-time.After(time.Second) + } + + hub.unregister(peer) + }() + + return nil +} + +func (hub *Hub) Disconnect(peerID string) { + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + + peer, ok := hub.peerMap[peerID] + + if ok { + peer.close(websocket.CloseNormalClosure) + } +} + +func (hub *Hub) PeerStatus(peerID string) (connected bool, pingTime time.Duration) { + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + + peer, ok := hub.peerMap[peerID] + + if ok { + return true, peer.getRoundTripTime() + } + + return false, 0 +} + +func (hub *Hub) ReconnectPeer(peerID string) { + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + + peer, ok := hub.peerMap[peerID] + + if ok { + peer.close(websocket.CloseTryAgainLater) + } +} + +func (hub *Hub) ReconnectPeerByPartition(partitionNumber uint64) { + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + + if peers, ok := hub.peerMapByPartitionNumber[partitionNumber]; ok { + for _, peer := range peers { + peer.close(websocket.CloseTryAgainLater) + } + } +} + +func (hub *Hub) ReconnectPeerBySite(siteID string) { + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + + if peers, ok := hub.peerMapBySiteID[siteID]; ok { + for _, peer := range peers { + peer.close(websocket.CloseTryAgainLater) + } + } +} + +func (hub *Hub) dialer(peerID string, noValidate bool, useDefaultRootCAs bool) (*websocket.Dialer, error) { + if hub.tlsConfig == nil { + dialer := &websocket.Dialer{} + return dialer,nil + } + + tlsConfig := hub.tlsConfig.Clone() + + if useDefaultRootCAs { + tlsConfig.RootCAs = nil + } + + tlsConfig.InsecureSkipVerify = noValidate + tlsConfig.ServerName = peerID + + dialer := &websocket.Dialer{ + TLSClientConfig: tlsConfig, + } + + return dialer, nil +} + +func (hub *Hub) register(peer *Peer) bool { + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + + if _, ok := hub.peerMap[peer.id]; ok { + return false + } + + Log.Debugf("Register peer %s", peer.id) + hub.peerMap[peer.id] = peer + + if _, ok := hub.peerMapByPartitionNumber[peer.partitionNumber]; !ok { + hub.peerMapByPartitionNumber[peer.partitionNumber] = make(map[string]*Peer) + } + + if _, ok := hub.peerMapBySiteID[peer.siteID]; !ok { + hub.peerMapBySiteID[peer.siteID] = make(map[string]*Peer) + } + + hub.peerMapByPartitionNumber[peer.partitionNumber][peer.id] = peer + hub.peerMapBySiteID[peer.siteID][peer.id] = peer + + return true +} + +func (hub *Hub) unregister(peer *Peer) { + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + + if _, ok := hub.peerMap[peer.id]; ok { + Log.Debugf("Unregister peer %s", peer.id) + } + + delete(hub.peerMap, peer.id) + delete(hub.peerMapByPartitionNumber[peer.partitionNumber], peer.id) + delete(hub.peerMapBySiteID[peer.siteID], peer.id) + + if len(hub.peerMapByPartitionNumber[peer.partitionNumber]) == 0 { + delete(hub.peerMapByPartitionNumber, peer.partitionNumber) + } + + if len(hub.peerMapBySiteID[peer.siteID]) == 0 { + delete(hub.peerMapBySiteID, peer.siteID) + } +} + +func (hub *Hub) Peers() []*PeerJSON { + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + peers := make([]*PeerJSON, 0, len(hub.peerMap)) + + for peerID, ps := range hub.peerMap { + peers = append(peers, ps.toJSON(peerID)) + } + + return peers +} + +func (hub *Hub) ExtractPeerID(conn *tls.Conn) (string, error) { + // VerifyClientCertIfGiven + verifiedChains := conn.ConnectionState().VerifiedChains + + if len(verifiedChains) != 1 { + return "", errors.New("Invalid client certificate") + } + + peerID := verifiedChains[0][0].Subject.CommonName + + return peerID, nil +} + +func (hub *Hub) SyncController() *SyncController { + return hub.syncController +} + +func (hub *Hub) ForwardEvents() { + if hub.historian.LogSerial() - hub.historian.ForwardIndex() - 1 >= hub.forwardThreshold { + select { + case hub.forwardEvents <- 1: + default: + } + } +} + +func (hub *Hub) StartForwardingEvents() { + go func() { + for { + select { + case <-hub.forwardEvents: + case <-time.After(time.Millisecond * time.Duration(hub.forwardInterval)): + } + + Log.Info("Begin event forwarding to the cloud") + + var cloudPeer *Peer + + hub.peerMapLock.Lock() + cloudPeer, ok := hub.peerMap[CLOUD_PEER_ID] + hub.peerMapLock.Unlock() + + if !ok { + Log.Info("No cloud present. Nothing to forward to") + + continue + } + + for hub.historian.ForwardIndex() < hub.historian.LogSerial() - 1 { + minSerial := hub.historian.ForwardIndex() + 1 + eventIterator, err := hub.historian.Query(&HistoryQuery{ MinSerial: &minSerial, Limit: int(hub.forwardBatchSize) }) + + if err != nil { + Log.Criticalf("Unable to query event history: %v. No more events will be forwarded to the cloud", err) + + return + } + + var highestIndex uint64 = minSerial + var batch []*Event = make([]*Event, 0, int(hub.forwardBatchSize)) + + for eventIterator.Next() { + if eventIterator.Event().Serial > highestIndex { + highestIndex = eventIterator.Event().Serial + } + + batch = append(batch, eventIterator.Event()) + } + + if eventIterator.Error() != nil { + Log.Criticalf("Unable to query event history. Event iterator error: %v. No more events will be forwarded to the cloud.", eventIterator.Error()) + + return + } + + Log.Debugf("Forwarding events %d to %d (inclusive) to the cloud.", minSerial, highestIndex) + + if err := cloudPeer.pushEvents(batch); err != nil { + Log.Warningf("Unable to push events to the cloud: %v. Event forwarding process will resume later.", err) + + break + } + + if err := hub.historian.SetForwardIndex(highestIndex); err != nil { + Log.Criticalf("Unable to update forwarding index after push: %v. No more events will be forwarded to the cloud", err) + + return + } + + if hub.purgeOnForward { + maxSerial := highestIndex + 1 + + if err := hub.historian.Purge(&HistoryQuery{ MaxSerial: &maxSerial }); err != nil { + Log.Warningf("Unable to purge events after push: %v.") + } + } + } + + Log.Info("History forwarding complete. Sleeping...") + } + }() +} + +func (hub *Hub) ForwardAlerts() { + // select { + // case hub.forwardAlerts <- 1: + // default: + // } +} + +func (hub *Hub) StartForwardingAlerts() { + go func() { + for { + select { + case <-hub.forwardAlerts: + case <-time.After(time.Millisecond * time.Duration(hub.alertsForwardInterval)): + } + + Log.Info("Begin alert forwarding to the cloud") + + var cloudPeer *Peer + + hub.peerMapLock.Lock() + cloudPeer, ok := hub.peerMap[CLOUD_PEER_ID] + hub.peerMapLock.Unlock() + + if !ok { + Log.Info("No cloud present. Nothing to forward to") + + continue + } + + alerts, err := hub.alertsMap.GetAlerts() + + if err != nil { + Log.Criticalf("Unable to query alerts map: %v. No more alerts will be forwarded to the cloud", err) + + return + } + + if len(alerts) == 0 { + Log.Infof("No new alerts. Nothing to forward") + + continue + } + + if err := cloudPeer.pushAlerts(alerts); err != nil { + Log.Warningf("Unable to push alerts to the cloud: %v. Alert forwarding process will resume later.", err) + + continue + } + + if err := hub.alertsMap.ClearAlerts(alerts); err != nil { + Log.Criticalf("Unable to clear alerts from the alert store after forwarding: %v. No more alerts will be forwarded to the cloud", err) + + return + } + + Log.Info("Alert forwarding complete. Sleeping...") + } + }() +} + +func (hub *Hub) BroadcastUpdate(siteID string, bucket string, update map[string]*SiblingSet, n uint64) { + // broadcast the specified update to at most n peers, or all peers if n is non-positive + var count uint64 = 0 + + hub.peerMapLock.Lock() + defer hub.peerMapLock.Unlock() + + peers := hub.peerMapBySiteID[siteID] + + for peerID, _ := range peers { + if !hub.syncController.bucketProxyFactory.OutgoingBuckets(peerID)[bucket] { + continue + } + + if n != 0 && count == n { + break + } + + hub.syncController.BroadcastUpdate(peerID, bucket, update, n) + count += 1 + } +} + +type SyncSession struct { + receiver chan *SyncMessageWrapper + sender chan *SyncMessageWrapper + sessionState interface{ } + waitGroup *sync.WaitGroup + peerID string + sessionID uint +} + +type SyncController struct { + bucketProxyFactory ddbSync.BucketProxyFactory + incoming chan *SyncMessageWrapper + peers map[string]chan *SyncMessageWrapper + waitGroups map[string]*sync.WaitGroup + initiatorSessionsMap map[string]map[uint]*SyncSession + responderSessionsMap map[string]map[uint]*SyncSession + initiatorSessions chan *SyncSession + responderSessions chan *SyncSession + maxSyncSessions uint + nextSessionID uint + mapMutex sync.RWMutex + syncScheduler ddbSync.SyncScheduler + explorationPathLimit uint32 +} + +func NewSyncController(maxSyncSessions uint, bucketProxyFactory ddbSync.BucketProxyFactory, syncScheduler ddbSync.SyncScheduler, explorationPathLimit uint32) *SyncController { + syncController := &SyncController{ + bucketProxyFactory: bucketProxyFactory, + incoming: make(chan *SyncMessageWrapper), + peers: make(map[string]chan *SyncMessageWrapper), + waitGroups: make(map[string]*sync.WaitGroup), + initiatorSessionsMap: make(map[string]map[uint]*SyncSession), + responderSessionsMap: make(map[string]map[uint]*SyncSession), + initiatorSessions: make(chan *SyncSession), + responderSessions: make(chan *SyncSession), + maxSyncSessions: maxSyncSessions, + nextSessionID: 1, + syncScheduler: syncScheduler, + explorationPathLimit: explorationPathLimit, + } + + go func() { + // multiplex incoming messages accross sync sessions + for msg := range syncController.incoming { + syncController.receiveMessage(msg) + } + }() + + return syncController +} + +func (s *SyncController) addPeer(peerID string, w chan *SyncMessageWrapper) error { + prometheusRelayConnectionsGauge.Inc() + s.mapMutex.Lock() + defer s.mapMutex.Unlock() + + if _, ok := s.peers[peerID]; ok { + return errors.New("Peer already registered") + } + + s.peers[peerID] = w + s.waitGroups[peerID] = &sync.WaitGroup{ } + s.initiatorSessionsMap[peerID] = make(map[uint]*SyncSession) + s.responderSessionsMap[peerID] = make(map[uint]*SyncSession) + + var buckets []string = make([]string, 0, len(s.bucketProxyFactory.IncomingBuckets(peerID))) + + for bucket, _ := range s.bucketProxyFactory.IncomingBuckets(peerID) { + buckets = append(buckets, bucket) + } + + s.syncScheduler.AddPeer(peerID, buckets) + s.syncScheduler.Schedule(peerID) + + return nil +} + +func (s *SyncController) removePeer(peerID string) { + prometheusRelayConnectionsGauge.Dec() + s.mapMutex.Lock() + + if _, ok := s.peers[peerID]; !ok { + s.mapMutex.Unlock() + return + } + + for _, syncSession := range s.initiatorSessionsMap[peerID] { + close(syncSession.receiver) + } + + for _, syncSession := range s.responderSessionsMap[peerID] { + close(syncSession.receiver) + } + + delete(s.initiatorSessionsMap, peerID) + delete(s.responderSessionsMap, peerID) + + s.syncScheduler.RemovePeer(peerID) + + wg := s.waitGroups[peerID] + s.mapMutex.Unlock() + wg.Wait() + + s.mapMutex.Lock() + close(s.peers[peerID]) + delete(s.peers, peerID) + delete(s.waitGroups, peerID) + s.mapMutex.Unlock() +} + +func (s *SyncController) addResponderSession(peerID string, sessionID uint, bucketName string) bool { + if !s.bucketProxyFactory.OutgoingBuckets(peerID)[bucketName] { + Log.Errorf("Unable to add responder session %d for peer %s because bucket %s does not allow outgoing messages to this peer", sessionID, peerID, bucketName) + + return false + } + + bucketProxy, err := s.bucketProxyFactory.CreateBucketProxy(peerID, bucketName) + + if err != nil { + Log.Errorf("Unable to add responder session %d for peer %s because a bucket proxy could not be created for bucket %s: %v", sessionID, peerID, bucketName, err) + + return false + } + + s.mapMutex.Lock() + defer s.mapMutex.Unlock() + + if _, ok := s.responderSessionsMap[peerID]; !ok { + Log.Errorf("Unable to add responder session %d for peer %s because this peer is not registered with the sync controller", sessionID, peerID) + + return false + } + + if _, ok := s.responderSessionsMap[peerID][sessionID]; ok { + Log.Errorf("Unable to add responder session %d for peer %s because a responder session with this id for this peer already exists", sessionID, peerID) + + return false + } + + newResponderSession := &SyncSession{ + receiver: make(chan *SyncMessageWrapper, 1), + sender: s.peers[peerID], + sessionState: NewResponderSyncSession(bucketProxy), + waitGroup: s.waitGroups[peerID], + peerID: peerID, + sessionID: sessionID, + } + + s.responderSessionsMap[peerID][sessionID] = newResponderSession + newResponderSession.waitGroup.Add(1) + + select { + case s.responderSessions <- newResponderSession: + Log.Debugf("Added responder session %d for peer %s and bucket %s", sessionID, peerID, bucketName) + + return true + default: + Log.Warningf("Unable to add responder session %d for peer %s and bucket %s because there are already %d responder sync sessions", sessionID, peerID, bucketName, s.maxSyncSessions) + + delete(s.responderSessionsMap[peerID], sessionID) + newResponderSession.waitGroup.Done() + return false + } +} + +func (s *SyncController) addInitiatorSession(peerID string, sessionID uint, bucketName string) bool { + if !s.bucketProxyFactory.IncomingBuckets(peerID)[bucketName] { + Log.Errorf("Unable to add initiator session %d for peer %s because bucket %s does not allow incoming messages from this peer", sessionID, peerID, bucketName) + + return false + } + + bucketProxy, err := s.bucketProxyFactory.CreateBucketProxy(peerID, bucketName) + + if err != nil { + Log.Errorf("Unable to add initiator session %d for peer %s because a bucket proxy could not be created for bucket %s: %v", sessionID, peerID, bucketName, err) + + return false + } + + s.mapMutex.Lock() + defer s.mapMutex.Unlock() + + if _, ok := s.initiatorSessionsMap[peerID]; !ok { + Log.Errorf("Unable to add initiator session %d for peer %s because this peer is not registered with the sync controller", sessionID, peerID) + + return false + } + + newInitiatorSession := &SyncSession{ + receiver: make(chan *SyncMessageWrapper, 1), + sender: s.peers[peerID], + sessionState: NewInitiatorSyncSession(sessionID, bucketProxy, s.explorationPathLimit, s.bucketProxyFactory.OutgoingBuckets(peerID)[bucketName]), + waitGroup: s.waitGroups[peerID], + peerID: peerID, + sessionID: sessionID, + } + + // check map to see if it has this one + if _, ok := s.initiatorSessionsMap[peerID][sessionID]; ok { + Log.Errorf("Unable to add initiator session %d for peer %s because an initiator session with this id for this peer already exists", sessionID, peerID) + + return false + } + + s.initiatorSessionsMap[peerID][sessionID] = newInitiatorSession + newInitiatorSession.waitGroup.Add(1) + + select { + case s.initiatorSessions <- newInitiatorSession: + Log.Debugf("Added initiator session %d for peer %s and bucket %s", sessionID, peerID, bucketName) + + s.syncScheduler.Advance() + s.initiatorSessionsMap[peerID][sessionID].receiver <- nil + + return true + default: + Log.Warningf("Unable to add initiator session %d for peer %s and bucket %s because there are already %d initiator sync sessions", sessionID, peerID, bucketName, s.maxSyncSessions) + + delete(s.initiatorSessionsMap[peerID], sessionID) + newInitiatorSession.waitGroup.Done() + return false + } +} + +func (s *SyncController) removeResponderSession(responderSession *SyncSession) { + s.mapMutex.Lock() + + if _, ok := s.responderSessionsMap[responderSession.peerID]; ok { + delete(s.responderSessionsMap[responderSession.peerID], responderSession.sessionID) + } + + s.mapMutex.Unlock() + responderSession.waitGroup.Done() + + Log.Debugf("Removed responder session %d for peer %s", responderSession.sessionID, responderSession.peerID) +} + +func (s *SyncController) removeInitiatorSession(initiatorSession *SyncSession) { + s.mapMutex.Lock() + + if _, ok := s.initiatorSessionsMap[initiatorSession.peerID]; ok { + s.syncScheduler.Schedule(initiatorSession.peerID) + delete(s.initiatorSessionsMap[initiatorSession.peerID], initiatorSession.sessionID) + } + + s.mapMutex.Unlock() + initiatorSession.waitGroup.Done() + + Log.Debugf("Removed initiator session %d for peer %s", initiatorSession.sessionID, initiatorSession.peerID) +} + +func (s *SyncController) sendAbort(peerID string, sessionID uint, direction uint) { + s.mapMutex.RLock() + defer s.mapMutex.RUnlock() + + w := s.peers[peerID] + + if w != nil { + w <- &SyncMessageWrapper{ + SessionID: sessionID, + MessageType: SYNC_ABORT, + MessageBody: &Abort{ }, + Direction: direction, + } + } +} + +func (s *SyncController) receiveMessage(msg *SyncMessageWrapper) { + nodeID := msg.nodeID + sessionID := msg.SessionID + + if msg.Direction == REQUEST { + if msg.MessageType == SYNC_START { + if !s.addResponderSession(nodeID, sessionID, msg.MessageBody.(Start).Bucket) { + s.sendAbort(nodeID, sessionID, RESPONSE) + } + } + + s.mapMutex.RLock() + defer s.mapMutex.RUnlock() + + if _, ok := s.responderSessionsMap[nodeID]; !ok { + // s.sendAbort(nodeID, sessionID, RESPONSE) + + return + } + + if _, ok := s.responderSessionsMap[nodeID][sessionID]; !ok { + // s.sendAbort(nodeID, sessionID, RESPONSE) + + return + } + + // This select statement means that the function does not block if + // the read loop in runInitiatorSession is not running if the session + // is being removed currently. This works due to the synchronous nature + // of the protocol. + select { + case s.responderSessionsMap[nodeID][sessionID].receiver <- msg: + default: + } + } else if msg.Direction == RESPONSE { // RESPONSE + s.mapMutex.RLock() + defer s.mapMutex.RUnlock() + + if _, ok := s.initiatorSessionsMap[nodeID]; !ok { + // s.sendAbort(nodeID, sessionID, REQUEST) + + return + } + + if _, ok := s.initiatorSessionsMap[nodeID][sessionID]; !ok { + // s.sendAbort(nodeID, sessionID, REQUEST) + + return + } + + select { + case s.initiatorSessionsMap[nodeID][sessionID].receiver <- msg: + default: + } + } else if msg.MessageType == SYNC_PUSH_MESSAGE { + pushMessage := msg.MessageBody.(PushMessage) + + if !s.bucketProxyFactory.IncomingBuckets(nodeID)[pushMessage.Bucket] { + Log.Errorf("Ignoring push message from %s because this node does not accept incoming pushes from bucket %s from that node", nodeID, pushMessage.Bucket) + + return + } + + key := pushMessage.Key + value := pushMessage.Value + bucketProxy, err := s.bucketProxyFactory.CreateBucketProxy(nodeID, pushMessage.Bucket) + + if err != nil { + Log.Errorf("Ignoring push message from %s for bucket %s because an error occurred while creating a bucket proxy: %v", nodeID, pushMessage.Bucket, err) + + return + } + + err = bucketProxy.Merge(map[string]*SiblingSet{ key: value }) + + if err != nil { + Log.Errorf("Unable to merge object from peer %s into key %s in bucket %s: %v", nodeID, key, pushMessage.Bucket, err) + } else { + Log.Infof("Merged object from peer %s into key %s in bucket %s", nodeID, key, pushMessage.Bucket) + } + } +} + +func (s *SyncController) runInitiatorSession() { + for initiatorSession := range s.initiatorSessions { + state := initiatorSession.sessionState.(*InitiatorSyncSession) + + for { + var receivedMessage *SyncMessageWrapper + + select { + case receivedMessage = <-initiatorSession.receiver: + case <-time.After(time.Second * SYNC_SESSION_WAIT_TIMEOUT_SECONDS): + Log.Warningf("[%s-%d] timeout", initiatorSession.peerID, initiatorSession.sessionID) + } + + initialState := state.State() + + var m *SyncMessageWrapper = state.NextState(receivedMessage) + + m.Direction = REQUEST + + if receivedMessage == nil { + Log.Debugf("[%s-%d] nil : (%s -> %s) : %s", initiatorSession.peerID, initiatorSession.sessionID, StateName(initialState), StateName(state.State()), MessageTypeName(m.MessageType)) + } else { + Log.Debugf("[%s-%d] %s : (%s -> %s) : %s", initiatorSession.peerID, initiatorSession.sessionID, MessageTypeName(receivedMessage.MessageType), StateName(initialState), StateName(state.State()), MessageTypeName(m.MessageType)) + } + + if m != nil { + initiatorSession.sender <- m + } + + if state.State() == END { + break + } + } + + s.removeInitiatorSession(initiatorSession) + } +} + +func (s *SyncController) runResponderSession() { + for responderSession := range s.responderSessions { + state := responderSession.sessionState.(*ResponderSyncSession) + + for { + var receivedMessage *SyncMessageWrapper + + select { + case receivedMessage = <-responderSession.receiver: + case <-time.After(time.Second * SYNC_SESSION_WAIT_TIMEOUT_SECONDS): + Log.Warningf("[%s-%d] timeout", responderSession.peerID, responderSession.sessionID) + } + + initialState := state.State() + + var m *SyncMessageWrapper = state.NextState(receivedMessage) + + m.Direction = RESPONSE + + if receivedMessage == nil { + Log.Debugf("[%s-%d] nil : (%s -> %s) : %s", responderSession.peerID, responderSession.sessionID, StateName(initialState), StateName(state.State()), MessageTypeName(m.MessageType)) + } else { + Log.Debugf("[%s-%d] %s : (%s -> %s) : %s", responderSession.peerID, responderSession.sessionID, MessageTypeName(receivedMessage.MessageType), StateName(initialState), StateName(state.State()), MessageTypeName(m.MessageType)) + } + + if m != nil { + responderSession.sender <- m + } + + if state.State() == END { + break + } + } + + s.removeResponderSession(responderSession) + } +} + +func (s *SyncController) StartInitiatorSessions() { + for i := 0; i < int(s.maxSyncSessions); i += 1 { + go s.runInitiatorSession() + } + + go func() { + for { + peerID, bucketName := s.syncScheduler.Next() + + s.mapMutex.RLock() + + if peerID == "" { + s.mapMutex.RUnlock() + + continue + } + + s.mapMutex.RUnlock() + + if s.addInitiatorSession(peerID, s.nextSessionID, bucketName) { + s.nextSessionID += 1 + } else { + } + } + }() +} + +func (s *SyncController) StartResponderSessions() { + for i := 0; i < int(s.maxSyncSessions); i += 1 { + go s.runResponderSession() + } +} + +func (s *SyncController) Start() { + s.StartInitiatorSessions() + s.StartResponderSessions() +} + +func (s *SyncController) BroadcastUpdate(peerID string, bucket string, update map[string]*SiblingSet, n uint64) { + s.mapMutex.RLock() + defer s.mapMutex.RUnlock() + + for key, value := range update { + msg := &SyncMessageWrapper{ + SessionID: 0, + MessageType: SYNC_PUSH_MESSAGE, + MessageBody: PushMessage{ + Key: key, + Value: value, + Bucket: bucket, + }, + Direction: PUSH, + } + + w := s.peers[peerID] + + if w != nil { + Log.Debugf("Push object at key %s in bucket %s to peer %s", key, bucket, peerID) + w <- msg + } + } +} diff --git a/vendor/github.com/armPelionEdge/devicedb/server/server.go b/vendor/github.com/armPelionEdge/devicedb/server/server.go new file mode 100644 index 0000000..984be16 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/server/server.go @@ -0,0 +1,1299 @@ +package server +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "errors" + "net/http" + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/hex" + "time" + "strconv" + "github.com/gorilla/mux" + "github.com/gorilla/websocket" + "net/http/pprof" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/bucket/builtin" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/merkle" + . "github.com/armPelionEdge/devicedb/shared" + . "github.com/armPelionEdge/devicedb/storage" + . "github.com/armPelionEdge/devicedb/historian" + . "github.com/armPelionEdge/devicedb/alerts" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" + ddbSync "github.com/armPelionEdge/devicedb/sync" + . "github.com/armPelionEdge/devicedb/site" + . "github.com/armPelionEdge/devicedb/transport" +) + +const ( + defaultNodePrefix = iota + cloudNodePrefix = iota + lwwNodePrefix = iota + localNodePrefix = iota + historianPrefix = iota + alertsMapPrefix = iota +) + +type peerAddress struct { + ID string `json:"id"` + Host string `json:"host"` + Port int `json:"port"` +} + +type cloudAddress struct { + ID string `json:"id"` + Host string `json:"host"` + Port int `json:"port"` + NoValidate bool + URI string `json:"uri"` +} + +type AlertEventData struct { + Metadata interface{} `json:"metadata"` + Status bool `json:"status"` +} + +type ServerConfig struct { + DBFile string + Port int + MerkleDepth uint8 + NodeID string + Hub *Hub + ServerTLS *tls.Config + PeerAddresses map[string]peerAddress + SyncPushBroadcastLimit uint64 + GCInterval uint64 + GCPurgeAge uint64 + Cloud *cloudAddress + History *cloudAddress + Alerts *cloudAddress + HistoryPurgeOnForward bool + HistoryEventLimit uint64 + HistoryEventFloor uint64 + HistoryPurgeBatchSize int + HistoryForwardBatchSize uint64 + HistoryForwardInterval uint64 + HistoryForwardThreshold uint64 + AlertsForwardInterval uint64 + SyncExplorationPathLimit uint32 +} + +func (sc *ServerConfig) LoadFromFile(file string) error { + var ysc YAMLServerConfig + + err := ysc.LoadFromFile(file) + + if err != nil { + return err + } + + sc.GCInterval = ysc.GCInterval + sc.GCPurgeAge = ysc.GCPurgeAge + sc.DBFile = ysc.DBFile + sc.Port = ysc.Port + sc.MerkleDepth = ysc.MerkleDepth + sc.SyncPushBroadcastLimit = ysc.SyncPushBroadcastLimit + sc.SyncExplorationPathLimit = ysc.SyncExplorationPathLimit + sc.PeerAddresses = make(map[string]peerAddress) + for _, yamlPeer := range ysc.Peers { + if _, ok := sc.PeerAddresses[yamlPeer.ID]; ok { + return errors.New(fmt.Sprintf("Duplicate entry for peer %s in config file", yamlPeer.ID)) + } + + sc.PeerAddresses[yamlPeer.ID] = peerAddress{ + ID: yamlPeer.ID, + Host: yamlPeer.Host, + Port: yamlPeer.Port, + } + } + + if ysc.Cloud != nil { + sc.Cloud = &cloudAddress{ + ID: ysc.Cloud.ID, + NoValidate: ysc.Cloud.NoValidate, + URI: ysc.Cloud.URI, + } + + sc.History = &cloudAddress{ + ID: ysc.Cloud.HistoryID, + NoValidate: ysc.Cloud.NoValidate, + URI: ysc.Cloud.HistoryURI, + } + + sc.Alerts = &cloudAddress{ + ID: ysc.Cloud.AlertsID, + NoValidate: ysc.Cloud.NoValidate, + URI: ysc.Cloud.AlertsURI, + } + } + + sc.HistoryPurgeOnForward = ysc.History.PurgeOnForward + sc.HistoryEventLimit = ysc.History.EventLimit + sc.HistoryEventFloor = ysc.History.EventFloor + sc.HistoryPurgeBatchSize = ysc.History.PurgeBatchSize + sc.HistoryForwardBatchSize = ysc.History.ForwardBatchSize + sc.HistoryForwardInterval = ysc.History.ForwardInterval + sc.HistoryForwardThreshold = ysc.History.ForwardThreshold + sc.AlertsForwardInterval = ysc.Alerts.ForwardInterval + + var clientTLSConfig *tls.Config = nil + sc.NodeID = ysc.NodeID + + if (YAMLTLSFiles{}) != ysc.TLS { + rootCAs := x509.NewCertPool() + + if !rootCAs.AppendCertsFromPEM([]byte(ysc.TLS.RootCA)) { + return errors.New("Could not append root CA to chain") + } + + clientCertificate, _ := tls.X509KeyPair([]byte(ysc.TLS.ClientCertificate), []byte(ysc.TLS.ClientKey)) + serverCertificate, _ := tls.X509KeyPair([]byte(ysc.TLS.ServerCertificate), []byte(ysc.TLS.ServerKey)) + clientTLSConfig = &tls.Config{ + Certificates: []tls.Certificate{ clientCertificate }, + RootCAs: rootCAs, + GetClientCertificate: func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return &clientCertificate, nil + }, + } + serverTLSConfig := &tls.Config{ + Certificates: []tls.Certificate{ serverCertificate }, + ClientCAs: rootCAs, + } + + sc.ServerTLS = serverTLSConfig + + + clientCertX509, _ := x509.ParseCertificate(clientCertificate.Certificate[0]) + serverCertX509, _ := x509.ParseCertificate(serverCertificate.Certificate[0]) + clientCN := clientCertX509.Subject.CommonName + serverCN := serverCertX509.Subject.CommonName + + if len(clientCN) == 0 { + return errors.New("The common name in the certificate is empty. The node ID must not be empty") + } + + if clientCN != serverCN { + return errors.New(fmt.Sprintf("Server and client certificates have differing common names(%s and %s). This is the string used to uniquely identify the node.", serverCN, clientCN)) + } + if ysc.NodeID == "" { + sc.NodeID = clientCN + } + } else { + if ysc.NodeID == "" { + return errors.New("No TLS certificates provided. Config file must have 'nodeid' field") + } + Log.Infof(" No TLS Config provided. Http mode\n") + } + sc.Hub = NewHub(sc.NodeID, NewSyncController(uint(ysc.MaxSyncSessions), nil, ddbSync.NewPeriodicSyncScheduler(time.Millisecond * time.Duration(ysc.SyncSessionPeriod)), sc.SyncExplorationPathLimit), clientTLSConfig) + return nil +} + +type Server struct { + bucketList *BucketList + httpServer *http.Server + listener net.Listener + storageDriver StorageDriver + port int + upgrader websocket.Upgrader + hub *Hub + serverTLS *tls.Config + id string + syncPushBroadcastLimit uint64 + garbageCollector *GarbageCollector + historian *Historian + alertsMap *AlertMap + merkleDepth uint8 +} + +func NewServer(serverConfig ServerConfig) (*Server, error) { + if serverConfig.MerkleDepth < MerkleMinDepth || serverConfig.MerkleDepth > MerkleMaxDepth { + serverConfig.MerkleDepth = MerkleDefaultDepth + } + + if len(serverConfig.NodeID) == 0 { + serverConfig.NodeID = "Node" + } + + upgrader := websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + } + + storageDriver := NewLevelDBStorageDriver(serverConfig.DBFile, nil) + nodeID := serverConfig.NodeID + server := &Server{ NewBucketList(), nil, nil, storageDriver, serverConfig.Port, upgrader, serverConfig.Hub, serverConfig.ServerTLS, nodeID, serverConfig.SyncPushBroadcastLimit, nil, nil, nil, serverConfig.MerkleDepth } + err := server.storageDriver.Open() + + if err != nil { + if err != ECorrupted { + Log.Errorf("Error creating server: %v", err.Error()) + + return nil, err + } + + Log.Error("Database is corrupted. Attempting automatic recovery now...") + + recoverError := server.recover() + + if recoverError != nil { + Log.Criticalf("Unable to recover corrupted database. Reason: %v", recoverError.Error()) + Log.Critical("Database daemon will now exit") + + return nil, EStorage + } + + Log.Info("Database recovery successful!") + } + + defaultBucket, _ := NewDefaultBucket(nodeID, NewPrefixedStorageDriver([]byte{ defaultNodePrefix }, storageDriver), serverConfig.MerkleDepth) + cloudBucket, _ := NewCloudBucket(nodeID, NewPrefixedStorageDriver([]byte{ cloudNodePrefix }, storageDriver), serverConfig.MerkleDepth, RelayMode) + lwwBucket, _ := NewLWWBucket(nodeID, NewPrefixedStorageDriver([]byte{ lwwNodePrefix }, storageDriver), serverConfig.MerkleDepth) + localBucket, _ := NewLocalBucket(nodeID, NewPrefixedStorageDriver([]byte{ localNodePrefix }, storageDriver), MerkleMinDepth) + + server.historian = NewHistorian(NewPrefixedStorageDriver([]byte{ historianPrefix }, storageDriver), serverConfig.HistoryEventLimit, serverConfig.HistoryEventFloor, serverConfig.HistoryPurgeBatchSize) + server.alertsMap = NewAlertMap(NewAlertStore(NewPrefixedStorageDriver([]byte{ alertsMapPrefix }, storageDriver))) + + server.bucketList.AddBucket(defaultBucket) + server.bucketList.AddBucket(lwwBucket) + server.bucketList.AddBucket(cloudBucket) + server.bucketList.AddBucket(localBucket) + + server.garbageCollector = NewGarbageCollector(server.bucketList, serverConfig.GCInterval, serverConfig.GCPurgeAge) + + if server.hub != nil && server.hub.syncController != nil { + server.hub.historian = server.historian + server.hub.alertsMap = server.alertsMap + server.hub.purgeOnForward = serverConfig.HistoryPurgeOnForward + server.hub.forwardBatchSize = serverConfig.HistoryForwardBatchSize + server.hub.forwardThreshold = serverConfig.HistoryForwardThreshold + server.hub.forwardInterval = serverConfig.HistoryForwardInterval + server.hub.alertsForwardInterval = serverConfig.AlertsForwardInterval + } + + if server.hub != nil && server.hub.syncController != nil { + site := NewRelaySiteReplica(nodeID, server.bucketList) + sitePool := &RelayNodeSitePool{ Site: site } + bucketProxyFactory := &ddbSync.RelayBucketProxyFactory{ SitePool: sitePool } + server.hub.syncController.bucketProxyFactory = bucketProxyFactory + } + + if server.hub != nil && serverConfig.PeerAddresses != nil { + for _, pa := range serverConfig.PeerAddresses { + server.hub.Connect(pa.ID, pa.Host, pa.Port) + } + } + + if server.hub != nil && serverConfig.Cloud != nil { + server.hub.ConnectCloud(serverConfig.Cloud.ID, serverConfig.Cloud.URI, serverConfig.History.ID, serverConfig.History.URI, serverConfig.Alerts.ID, serverConfig.Alerts.URI, serverConfig.Cloud.NoValidate) + } + + return server, nil +} + +func (server *Server) Port() int { + return server.port +} + +func (server *Server) Buckets() *BucketList { + return server.bucketList +} + +func (server *Server) History() *Historian { + return server.historian +} + +func (server *Server) AlertsMap() *AlertMap { + return server.alertsMap +} + +func (server *Server) StartGC() { + server.garbageCollector.Start() +} + +func (server *Server) StopGC() { + server.garbageCollector.Stop() +} + +func (server *Server) recover() error { + recoverError := server.storageDriver.Recover() + + if recoverError != nil { + Log.Criticalf("Unable to recover corrupted database. Reason: %v", recoverError.Error()) + + return EStorage + } + + Log.Infof("Rebuilding merkle trees...") + + for i := 0; i < localNodePrefix; i += 1 { + tempBucket, _ := NewDefaultBucket("temp", NewPrefixedStorageDriver([]byte{ byte(i) }, server.storageDriver), server.merkleDepth) + rebuildError := tempBucket.RebuildMerkleLeafs() + + if rebuildError != nil { + Log.Errorf("Unable to rebuild merkle tree for node %d. Reason: %v", i, rebuildError.Error()) + + return rebuildError + } + + recordError := tempBucket.RecordMetadata() + + if recordError != nil { + Log.Errorf("Unable to rebuild node metadata for node %d. Reason: %v", i, recordError.Error()) + + return recordError + } + } + + return nil +} + +func (server *Server) Start() error { + r := mux.NewRouter() + + r.HandleFunc("/{bucket}/merkleRoot", func(w http.ResponseWriter, r *http.Request) { + bucket := mux.Vars(r)["bucket"] + + if !server.bucketList.HasBucket(bucket) { + Log.Warningf("POST /{bucket}/merkleRoot: Invalid bucket") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EInvalidBucket.JSON()) + "\n") + + return + } + + hashBytes := server.bucketList.Get(bucket).MerkleTree().RootHash().Bytes() + + w.Header().Set("Content-Type", "text/plain; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, hex.EncodeToString(hashBytes[:])) + }).Methods("GET") + + r.HandleFunc("/{bucket}/watch", func(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query() + + var keys [][]byte = make([][]byte, 0) + var prefixes [][]byte = make([][]byte, 0) + var lastSerial uint64 + + if qKeys, ok := query["key"]; ok { + keys = make([][]byte, len(qKeys)) + + for i, key := range qKeys { + keys[i] = []byte(key) + } + } + + if qPrefixes, ok := query["prefix"]; ok { + prefixes = make([][]byte, len(qPrefixes)) + + for i, prefix := range qPrefixes { + prefixes[i] = []byte(prefix) + } + } + + if qLastSerials, ok := query["lastSerial"]; ok { + ls, err := strconv.ParseUint(qLastSerials[0], 10, 64) + + if err != nil { + Log.Warningf("GET /{bucket}/watch: Invalid lastSerial specified") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EInvalidKey.JSON()) + "\n") + + return + } + + lastSerial = ls + } + + bucket := mux.Vars(r)["bucket"] + + if !server.bucketList.HasBucket(bucket) { + Log.Warningf("GET /{bucket}/watch: Invalid bucket") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EInvalidBucket.JSON()) + "\n") + + return + } + + var ch chan Row = make(chan Row) + go server.bucketList.Get(bucket).Watch(r.Context(), keys, prefixes, lastSerial, ch) + + flusher, _ := w.(http.Flusher) + + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + // It is important not to break out of this loop early + // If the channel is not read until it is closed it will + // block future upates + for update := range ch { + // This is a marker intended to indicate + // the end of the initial query to find + // any missed updates based on the lastSerial + // field + if update.Key == "" { + fmt.Fprintf(w, "data: \n\n") + flusher.Flush() + continue + } + + var transportUpdate TransportRow + + if err := transportUpdate.FromRow(&update); err != nil { + Log.Errorf("Encountered an error while converting an update to its transport format: %v", err) + continue + } + + encodedUpdate, err := json.Marshal(transportUpdate) + + if err != nil { + Log.Errorf("Encountered an error while encoding an update to JSON: %v", err) + continue + } + + _, err = fmt.Fprintf(w, "data: %s\n\n", string(encodedUpdate)) + + flusher.Flush() + + if err != nil { + Log.Errorf("Encountered an error while writing an update to the event stream for a watcher: %v", err) + continue + } + } + }).Methods("GET") + + r.HandleFunc("/{bucket}/values", func(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bucket := mux.Vars(r)["bucket"] + + if !server.bucketList.HasBucket(bucket) { + Log.Warningf("POST /{bucket}/values: Invalid bucket") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EInvalidBucket.JSON()) + "\n") + + return + } + + var keysArray *[]string + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&keysArray) + + if err != nil || keysArray == nil { + Log.Warningf("POST /{bucket}/values: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EInvalidKey.JSON()) + "\n") + + return + } + + keys := *keysArray + + if len(keys) == 0 { + siblingSetsJSON, _ := json.Marshal([]*TransportSiblingSet{ }) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(siblingSetsJSON)) + + return + } + + byteKeys := make([][]byte, 0, len(keys)) + + for _, k := range keys { + if len(k) == 0 { + Log.Warningf("POST /{bucket}/values: Empty key") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EInvalidKey.JSON()) + "\n") + + return + } + + byteKeys = append(byteKeys, []byte(k)) + } + + siblingSets, err := server.bucketList.Get(bucket).Get(byteKeys) + + if err != nil { + Log.Warningf("POST /{bucket}/values: Internal server error") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(err.(DBerror).JSON()) + "\n") + + return + } + + transportSiblingSets := make([]*TransportSiblingSet, 0, len(siblingSets)) + + for _, siblingSet := range siblingSets { + if siblingSet == nil { + transportSiblingSets = append(transportSiblingSets, nil) + + continue + } + + var transportSiblingSet TransportSiblingSet + err := transportSiblingSet.FromSiblingSet(siblingSet) + + if err != nil { + Log.Warningf("POST /{bucket}/values: Internal server error") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(err.(DBerror).JSON()) + "\n") + + return + } + + transportSiblingSets = append(transportSiblingSets, &transportSiblingSet) + } + + siblingSetsJSON, _ := json.Marshal(transportSiblingSets) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(siblingSetsJSON)) + + Log.Debugf("Get from bucket %s: %v took %s", bucket, keys, time.Since(startTime)) + }).Methods("POST") + + r.HandleFunc("/{bucket}/matches", func(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bucket := mux.Vars(r)["bucket"] + + if !server.bucketList.HasBucket(bucket) { + Log.Warningf("POST /{bucket}/matches: Invalid bucket") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EInvalidBucket.JSON()) + "\n") + + return + } + + var keysArray *[]string + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&keysArray) + + if err != nil || keysArray == nil { + Log.Warningf("POST /{bucket}/matches: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EInvalidKey.JSON()) + "\n") + + return + } + + keys := *keysArray + + if len(keys) == 0 { + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + + return + } + + byteKeys := make([][]byte, 0, len(keys)) + + for _, k := range keys { + if len(k) == 0 { + Log.Warningf("POST /{bucket}/matches: Empty key") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EInvalidKey.JSON()) + "\n") + + return + } + + byteKeys = append(byteKeys, []byte(k)) + } + + ssIterator, err := server.bucketList.Get(bucket).GetMatches(byteKeys) + + if err != nil { + Log.Warningf("POST /{bucket}/matches: Internal server error") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(err.(DBerror).JSON()) + "\n") + + return + } + + defer ssIterator.Release() + + flusher, _ := w.(http.Flusher) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(http.StatusOK) + + for ssIterator.Next() { + key := ssIterator.Key() + prefix := ssIterator.Prefix() + nextSiblingSet := ssIterator.Value() + + if nextSiblingSet.IsTombstoneSet() { + continue + } + + var nextTransportSiblingSet TransportSiblingSet + + err := nextTransportSiblingSet.FromSiblingSet(nextSiblingSet) + + if err != nil { + Log.Warningf("POST /{bucket}/matches: Internal server error") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(err.(DBerror).JSON()) + "\n") + + return + } + + siblingSetsJSON, _ := json.Marshal(&nextTransportSiblingSet) + + _, err = fmt.Fprintf(w, "%s\n%s\n%s\n", string(prefix), string(key), string(siblingSetsJSON)) + flusher.Flush() + + if err != nil { + return + } + } + + Log.Debugf("Get matches from bucket %s: %v took %s", bucket, keys, time.Since(startTime)) + }).Methods("POST") + + r.HandleFunc("/events/{sourceID}/{type}", func(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query() + + var category string + var groups []string + + if categories, ok := query["category"]; ok { + category = categories[0] + } + + if _, ok := query["group"]; ok { + groups = query["group"] + } else { + groups = []string{ } + } + + eventType := mux.Vars(r)["type"] + sourceID := mux.Vars(r)["sourceID"] + body, err := ioutil.ReadAll(r.Body) + + if err != nil { + Log.Warningf("PUT /events/{type}/{sourceID}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EReadBody.JSON()) + "\n") + + return + } + + if len(eventType) == 0 { + Log.Warningf("PUT /events/{type}/{sourceID}: Empty event type") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EEmpty.JSON()) + "\n") + + return + } + + if len(sourceID) == 0 { + Log.Warningf("PUT /events/{type}/{sourceID}: Source id empty") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EEmpty.JSON()) + "\n") + + return + } + + timestamp := NanoToMilli(uint64(time.Now().UnixNano())) + + if category == "alerts" { + var alertData AlertEventData + + err = json.Unmarshal(body, &alertData) + + if err != nil { + Log.Warningf("PUT /events/{type}/{sourceID}: Unable to parse alert body %s", body) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EAlertBody.JSON()) + "\n") + + return + } + + err = server.alertsMap.UpdateAlert(Alert{ + Key: sourceID, + Level: eventType, + Timestamp: timestamp, + Metadata: alertData.Metadata, + Status: alertData.Status, + }) + + server.hub.ForwardAlerts() + } else { + err = server.historian.LogEvent(&Event{ + Timestamp: timestamp, + SourceID: sourceID, + Type: eventType, + Data: string(body), + Groups: groups, + }) + + server.hub.ForwardEvents() + } + + if err != nil { + Log.Warningf("POST /events/{type}/{sourceID}: Internal server error") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(err.(DBerror).JSON()) + "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("PUT") + + r.HandleFunc("/events", func(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query() + + var historyQuery HistoryQuery + + if sources, ok := query["source"]; ok { + historyQuery.Sources = make([]string, 0, len(sources)) + + for _, source := range sources { + if len(source) != 0 { + historyQuery.Sources = append(historyQuery.Sources, source) + } + } + } else { + historyQuery.Sources = make([]string, 0) + } + + if _, ok := query["limit"]; ok { + limitString := query.Get("limit") + limit, err := strconv.Atoi(limitString) + + if err != nil { + Log.Warningf("GET /events: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + historyQuery.Limit = limit + } + + if _, ok := query["sortOrder"]; ok { + sortOrder := query.Get("sortOrder") + + if sortOrder == "desc" || sortOrder == "asc" { + historyQuery.Order = sortOrder + } + } + + if _, ok := query["data"]; ok { + data := query.Get("data") + + historyQuery.Data = &data + } + + if _, ok := query["maxAge"]; ok { + maxAgeString := query.Get("maxAge") + maxAge, err := strconv.Atoi(maxAgeString) + + if err != nil { + Log.Warningf("GET /events: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + if maxAge <= 0 { + Log.Warningf("GET /events: Non positive age specified") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + nowMS := NanoToMilli(uint64(time.Now().UnixNano())) + historyQuery.After = nowMS - uint64(maxAge) + } else { + if _, ok := query["afterTime"]; ok { + afterString := query.Get("afterTime") + after, err := strconv.Atoi(afterString) + + if err != nil { + Log.Warningf("GET /events: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + if after < 0 { + Log.Warningf("GET /events: Non positive after specified") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + historyQuery.After = uint64(after) + } + + if _, ok := query["beforeTime"]; ok { + beforeString := query.Get("beforeTime") + before, err := strconv.Atoi(beforeString) + + if err != nil { + Log.Warningf("GET /events: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + if before < 0 { + Log.Warningf("GET /events: Non positive before specified") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + historyQuery.Before = uint64(before) + } + } + + eventIterator, err := server.historian.Query(&historyQuery) + + if err != nil { + Log.Warningf("GET /events: Internal server error") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(err.(DBerror).JSON()) + "\n") + + return + } + + defer eventIterator.Release() + + flusher, _ := w.(http.Flusher) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(http.StatusOK) + + for eventIterator.Next() { + event := eventIterator.Event() + eventJSON, _ := json.Marshal(&event) + + _, err = fmt.Fprintf(w, "%s\n", string(eventJSON)) + flusher.Flush() + + if err != nil { + return + } + } + }).Methods("GET") + + r.HandleFunc("/events", func(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query() + + var historyQuery HistoryQuery + + if _, ok := query["maxAge"]; ok { + maxAgeString := query.Get("maxAge") + maxAge, err := strconv.Atoi(maxAgeString) + + if err != nil { + Log.Warningf("GET /events: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + if maxAge <= 0 { + Log.Warningf("GET /events: Non positive age specified") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + nowMS := NanoToMilli(uint64(time.Now().UnixNano())) + historyQuery.After = nowMS - uint64(maxAge) + } else { + if _, ok := query["afterTime"]; ok { + afterString := query.Get("afterTime") + after, err := strconv.Atoi(afterString) + + if err != nil { + Log.Warningf("GET /events: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + if after < 0 { + Log.Warningf("GET /events: Non positive after specified") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + historyQuery.After = uint64(after) + } + + if _, ok := query["beforeTime"]; ok { + beforeString := query.Get("beforeTime") + before, err := strconv.Atoi(beforeString) + + if err != nil { + Log.Warningf("GET /events: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + if before < 0 { + Log.Warningf("GET /events: Non positive before specified") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(ERequestQuery.JSON()) + "\n") + + return + } + + historyQuery.Before = uint64(before) + } + } + + err := server.historian.Purge(&historyQuery) + + if err != nil { + Log.Warningf("GET /events: Internal server error") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(err.(DBerror).JSON()) + "\n") + + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("DELETE") + + r.HandleFunc("/{bucket}/batch", func(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + bucket := mux.Vars(r)["bucket"] + + if !server.bucketList.HasBucket(bucket) { + Log.Warningf("POST /{bucket}/batch: Invalid bucket") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EInvalidBucket.JSON()) + "\n") + + return + } + + if !server.bucketList.Get(bucket).ShouldAcceptWrites("") { + Log.Warningf("POST /{bucket}/batch: Attempted to read from %s bucket", bucket) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusUnauthorized) + io.WriteString(w, string(EUnauthorized.JSON()) + "\n") + + return + } + + var updateBatch UpdateBatch + var transportUpdateBatch TransportUpdateBatch + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&transportUpdateBatch) + + if err != nil { + Log.Warningf("POST /{bucket}/batch: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EInvalidBatch.JSON()) + "\n") + + return + } + + err = transportUpdateBatch.ToUpdateBatch(&updateBatch) + + if err != nil { + Log.Warningf("POST /{bucket}/batch: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EInvalidBatch.JSON()) + "\n") + + return + } + + updatedSiblingSets, err := server.bucketList.Get(bucket).Batch(&updateBatch) + + if err != nil { + Log.Warningf("POST /{bucket}/batch: Internal server error") + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(err.(DBerror).JSON()) + "\n") + + return + } + + if server.hub != nil { + server.hub.BroadcastUpdate("", bucket, updatedSiblingSets, server.syncPushBroadcastLimit) + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + + Log.Debugf("Batch update to bucket %s took %s", bucket, time.Since(startTime)) + }).Methods("POST") + + r.HandleFunc("/peers", func(w http.ResponseWriter, r *http.Request) { + // { id: peerID, direction: direction, status: status } + var peers []*PeerJSON + + if server.hub != nil { + peers = server.hub.Peers() + } else { + peers = make([]*PeerJSON, 0) + } + + peersJSON, _ := json.Marshal(peers) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(peersJSON) + "\n") + }).Methods("GET") + + r.HandleFunc("/peers/{peerID}", func(w http.ResponseWriter, r *http.Request) { + peerID := mux.Vars(r)["peerID"] + + var pa peerAddress + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&pa) + + if err != nil { + Log.Warningf("PUT /peer/{peerID}: %v", err) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EInvalidPeer.JSON()) + "\n") + + return + } + + if server.hub != nil { + server.hub.Connect(peerID, pa.Host, pa.Port) + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("PUT") + + r.HandleFunc("/peers/{peerID}", func(w http.ResponseWriter, r *http.Request) { + peerID := mux.Vars(r)["peerID"] + + if server.hub != nil { + server.hub.Disconnect(peerID) + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, "\n") + }).Methods("DELETE") + + r.HandleFunc("/sync", func(w http.ResponseWriter, r *http.Request) { + if server.hub == nil { + // log error + + return + } + + conn, err := server.upgrader.Upgrade(w, r, nil) + + if err != nil { + return + } + + server.hub.Accept(conn, 0, "", "", false) + }).Methods("GET") + + r.HandleFunc("/debug/pprof/", pprof.Index) + r.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/debug/pprof/profile", pprof.Profile) + r.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + + server.httpServer = &http.Server{ + Handler: r, + WriteTimeout: 0, + ReadTimeout: 15 * time.Second, + } + + var listener net.Listener + var err error + + if server.serverTLS == nil { + listener, err = net.Listen("tcp", "0.0.0.0:" + strconv.Itoa(server.Port())) + } else { + server.serverTLS.ClientAuth = tls.VerifyClientCertIfGiven + listener, err = tls.Listen("tcp", "0.0.0.0:" + strconv.Itoa(server.Port()), server.serverTLS) + } + + if err != nil { + Log.Errorf("Error listening on port: %d", server.port) + + server.Stop() + + return err + } + + err = server.storageDriver.Open() + + if err != nil { + if err != ECorrupted { + Log.Errorf("Error opening storage driver: %v", err.Error()) + + return EStorage + } + + Log.Error("Database is corrupted. Attempting automatic recovery now...") + + recoverError := server.recover() + + if recoverError != nil { + Log.Criticalf("Unable to recover corrupted database. Reason: %v", recoverError.Error()) + Log.Critical("Database daemon will now exit") + + return EStorage + } + } + + server.listener = listener + + Log.Infof("Node %s listening on port %d", server.id, server.port) + + err = server.httpServer.Serve(server.listener) + + Log.Errorf("Node %s server shutting down. Reason: %v", server.id, err) + + return err +} + +func (server *Server) Stop() error { + if server.listener != nil { + server.listener.Close() + } + + server.storageDriver.Close() + + return nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/server/sync.go b/vendor/github.com/armPelionEdge/devicedb/server/sync.go new file mode 100644 index 0000000..dec474f --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/server/sync.go @@ -0,0 +1,881 @@ +package server +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/logging" + ddbSync "github.com/armPelionEdge/devicedb/sync" +) + +const ( + START = iota + HANDSHAKE = iota + ROOT_HASH_COMPARE = iota + LEFT_HASH_COMPARE = iota + RIGHT_HASH_COMPARE = iota + HASH_COMPARE = iota + DB_OBJECT_PUSH = iota + END = iota +) + +func StateName(s int) string { + names := map[int]string{ + START: "START", + HANDSHAKE: "HANDSHAKE", + ROOT_HASH_COMPARE: "ROOT_HASH_COMPARE", + LEFT_HASH_COMPARE: "LEFT_HASH_COMPARE", + RIGHT_HASH_COMPARE: "RIGHT_HASH_COMPARE", + HASH_COMPARE: "HASH_COMPARE", + DB_OBJECT_PUSH: "DB_OBJECT_PUSH", + END: "END", + } + + return names[s] +} + +const PROTOCOL_VERSION uint = 2 + +// the state machine +type InitiatorSyncSession struct { + sessionID uint + currentState int + maxDepth uint8 + theirDepth uint8 + explorationQueue []uint32 + explorationPathLimit uint32 + bucketProxy ddbSync.BucketProxy + replicatesOutgoing bool + currentNodeKeys map[string]bool +} + +func NewInitiatorSyncSession(id uint, bucketProxy ddbSync.BucketProxy, explorationPathLimit uint32, replicatesOutgoing bool) *InitiatorSyncSession { + return &InitiatorSyncSession{ + sessionID: id, + currentState: START, + maxDepth: bucketProxy.MerkleTree().Depth(), + explorationQueue: make([]uint32, 0), + explorationPathLimit: explorationPathLimit, + bucketProxy: bucketProxy, + replicatesOutgoing: replicatesOutgoing, + } +} + +func (syncSession *InitiatorSyncSession) State() int { + return syncSession.currentState +} + +func (syncSession *InitiatorSyncSession) SetState(state int) { + syncSession.currentState = state +} + +func (syncSession *InitiatorSyncSession) SetResponderDepth(d uint8) { + if d < syncSession.maxDepth { + syncSession.maxDepth = d + } + + syncSession.theirDepth = d +} + +func (syncSession *InitiatorSyncSession) ResponderDepth() uint8 { + return syncSession.theirDepth +} + +func (syncSession *InitiatorSyncSession) PushExplorationQueue(n uint32) { + syncSession.explorationQueue = append(syncSession.explorationQueue, n) +} + +func (syncSession *InitiatorSyncSession) PopExplorationQueue() uint32 { + var n uint32 + + if len(syncSession.explorationQueue) != 0 { + n = syncSession.explorationQueue[0] + + syncSession.explorationQueue = syncSession.explorationQueue[1:] + } + + return n +} + +func (syncSession *InitiatorSyncSession) PeekExplorationQueue() uint32 { + var n uint32 + + if len(syncSession.explorationQueue) != 0 { + n = syncSession.explorationQueue[0] + } + + return n +} + +func (syncSession *InitiatorSyncSession) ExplorationQueueSize() uint32 { + return uint32(len(syncSession.explorationQueue)) +} + +func (syncSession *InitiatorSyncSession) SetExplorationPathLimit(limit uint32) { + syncSession.explorationPathLimit = limit +} + +func (syncSession *InitiatorSyncSession) ExplorationPathLimit() uint32 { + return syncSession.explorationPathLimit +} + +func (syncSession *InitiatorSyncSession) getNodeKeys() error { + if syncSession.replicatesOutgoing { + return nil + } + + nodeKeys := make(map[string]bool) + iter, err := syncSession.bucketProxy.GetSyncChildren(syncSession.PeekExplorationQueue()) + + if err != nil { + return err + } + + for iter.Next() { + nodeKeys[string(iter.Key())] = true + } + + iter.Release() + + if iter.Error() != nil { + return iter.Error() + } + + syncSession.currentNodeKeys = nodeKeys + + return nil +} + +func (syncSession *InitiatorSyncSession) forgetNonAuthoritativeKeys() error { + if syncSession.replicatesOutgoing { + return nil + } + + nodeKeys := make([][]byte, 0, len(syncSession.currentNodeKeys)) + + for key, _ := range syncSession.currentNodeKeys { + nodeKeys = append(nodeKeys, []byte(key)) + } + + return syncSession.bucketProxy.Forget(nodeKeys) +} + +func (syncSession *InitiatorSyncSession) NextState(syncMessageWrapper *SyncMessageWrapper) *SyncMessageWrapper { + // Once an error occurs in the MerkleTree() the merkle tree will remain in the error state + // should abort this sync session + var messageWrapper *SyncMessageWrapper + + switch syncSession.currentState { + case START: + syncSession.currentState = HANDSHAKE + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_START, + MessageBody: Start{ + ProtocolVersion: PROTOCOL_VERSION, + MerkleDepth: syncSession.bucketProxy.MerkleTree().Depth(), + Bucket: syncSession.bucketProxy.Name(), + }, + } + + break + case HANDSHAKE: + if syncMessageWrapper == nil || syncMessageWrapper.MessageType != SYNC_START { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: &Abort{ }, + } + + break + } + + if syncMessageWrapper.MessageBody.(Start).ProtocolVersion != PROTOCOL_VERSION { + Log.Warningf("Initiator Session %d: responder protocol version is at %d which is unsupported by this database peer. Aborting...", syncSession.sessionID, syncMessageWrapper.MessageBody.(Start).ProtocolVersion) + + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: &Abort{ }, + } + + break + } + + if syncSession.maxDepth > syncMessageWrapper.MessageBody.(Start).MerkleDepth { + syncSession.maxDepth = syncMessageWrapper.MessageBody.(Start).MerkleDepth + } + + syncSession.theirDepth = syncMessageWrapper.MessageBody.(Start).MerkleDepth + syncSession.currentState = ROOT_HASH_COMPARE + syncSession.PushExplorationQueue(syncSession.bucketProxy.MerkleTree().RootNode()) + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_NODE_HASH, + MessageBody: MerkleNodeHash{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(syncSession.PeekExplorationQueue(), syncSession.theirDepth), + HashHigh: syncSession.bucketProxy.MerkleTree().NodeHash(syncSession.PeekExplorationQueue()).High(), + HashLow: syncSession.bucketProxy.MerkleTree().NodeHash(syncSession.PeekExplorationQueue()).Low(), + }, + } + + break + case ROOT_HASH_COMPARE: + myHash := syncSession.bucketProxy.MerkleTree().NodeHash(syncSession.PeekExplorationQueue()) + + if syncMessageWrapper == nil || syncMessageWrapper.MessageType != SYNC_NODE_HASH { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } else if syncMessageWrapper.MessageBody.(MerkleNodeHash).HashHigh == myHash.High() && syncMessageWrapper.MessageBody.(MerkleNodeHash).HashLow == myHash.Low() { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } else if syncSession.bucketProxy.MerkleTree().Level(syncSession.PeekExplorationQueue()) != syncSession.maxDepth { + syncSession.currentState = LEFT_HASH_COMPARE + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_NODE_HASH, + MessageBody: MerkleNodeHash{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(syncSession.bucketProxy.MerkleTree().LeftChild(syncSession.PeekExplorationQueue()), syncSession.theirDepth), + HashHigh: 0, + HashLow: 0, + }, + } + + break + } else { + syncSession.currentState = DB_OBJECT_PUSH + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_OBJECT_NEXT, + MessageBody: ObjectNext{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(syncSession.PeekExplorationQueue(), syncSession.theirDepth), + }, + } + + break + } + case LEFT_HASH_COMPARE: + myLeftChildHash := syncSession.bucketProxy.MerkleTree().NodeHash(syncSession.bucketProxy.MerkleTree().LeftChild(syncSession.PeekExplorationQueue())) + + if syncMessageWrapper == nil || syncMessageWrapper.MessageType != SYNC_NODE_HASH { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + if syncMessageWrapper.MessageBody.(MerkleNodeHash).HashHigh != myLeftChildHash.High() || syncMessageWrapper.MessageBody.(MerkleNodeHash).HashLow != myLeftChildHash.Low() { + syncSession.PushExplorationQueue(syncSession.bucketProxy.MerkleTree().LeftChild(syncSession.PeekExplorationQueue())) + } + + syncSession.currentState = RIGHT_HASH_COMPARE + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_NODE_HASH, + MessageBody: MerkleNodeHash{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(syncSession.bucketProxy.MerkleTree().RightChild(syncSession.PeekExplorationQueue()), syncSession.theirDepth), + HashHigh: 0, + HashLow: 0, + }, + } + + break + case RIGHT_HASH_COMPARE: + myRightChildHash := syncSession.bucketProxy.MerkleTree().NodeHash(syncSession.bucketProxy.MerkleTree().RightChild(syncSession.PeekExplorationQueue())) + + if syncMessageWrapper == nil || syncMessageWrapper.MessageType != SYNC_NODE_HASH { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + if syncMessageWrapper.MessageBody.(MerkleNodeHash).HashHigh != myRightChildHash.High() || syncMessageWrapper.MessageBody.(MerkleNodeHash).HashLow != myRightChildHash.Low() { + if syncSession.ExplorationQueueSize() <= syncSession.ExplorationPathLimit() { + syncSession.PushExplorationQueue(syncSession.bucketProxy.MerkleTree().RightChild(syncSession.PeekExplorationQueue())) + } + } + + syncSession.PopExplorationQueue() + + if syncSession.ExplorationQueueSize() == 0 { + // no more nodes to explore. abort + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } else if syncSession.bucketProxy.MerkleTree().Level(syncSession.PeekExplorationQueue()) == syncSession.maxDepth { + // cannot dig any deeper in any paths. need to move to DB_OBJECT_PUSH + err := syncSession.getNodeKeys() + + if err != nil { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + syncSession.currentState = DB_OBJECT_PUSH + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_OBJECT_NEXT, + MessageBody: ObjectNext{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(syncSession.PeekExplorationQueue(), syncSession.theirDepth), + }, + } + + break + } + + syncSession.currentState = LEFT_HASH_COMPARE + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_NODE_HASH, + MessageBody: MerkleNodeHash{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(syncSession.bucketProxy.MerkleTree().LeftChild(syncSession.PeekExplorationQueue()), syncSession.theirDepth), + HashHigh: 0, + HashLow: 0, + }, + } + + break + case DB_OBJECT_PUSH: + if syncMessageWrapper == nil || syncMessageWrapper.MessageType != SYNC_PUSH_MESSAGE && syncMessageWrapper.MessageType != SYNC_PUSH_DONE { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + if syncMessageWrapper.MessageType == SYNC_PUSH_DONE { + syncSession.PopExplorationQueue() + + err := syncSession.forgetNonAuthoritativeKeys() + + if err != nil || syncSession.ExplorationQueueSize() == 0 { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + err = syncSession.getNodeKeys() + + if err != nil { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_OBJECT_NEXT, + MessageBody: ObjectNext{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(syncSession.PeekExplorationQueue(), syncSession.theirDepth), + }, + } + + break + } + + var key string = syncMessageWrapper.MessageBody.(PushMessage).Key + var siblingSet *SiblingSet = syncMessageWrapper.MessageBody.(PushMessage).Value + + delete(syncSession.currentNodeKeys, key) + + err := syncSession.bucketProxy.Merge(map[string]*SiblingSet{ key: siblingSet }) + + if err != nil { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_OBJECT_NEXT, + MessageBody: ObjectNext{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(syncSession.PeekExplorationQueue(), syncSession.theirDepth), + }, + } + + break + case END: + return nil + } + + if syncSession.bucketProxy.MerkleTree().Error() != nil { + Log.Errorf("Initiator sync session %d encountered a merkle tree error: %v", syncSession.sessionID, syncSession.bucketProxy.MerkleTree().Error()) + + // Encountered a proxy error with the merkle tree + // need to abort + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + } + + return messageWrapper +} + + // the state machine +type ResponderSyncSession struct { + sessionID uint + currentState int + node uint32 + maxDepth uint8 + theirDepth uint8 + bucketProxy ddbSync.BucketProxy + iter SiblingSetIterator + currentIterationNode uint32 +} + +func NewResponderSyncSession(bucketProxy ddbSync.BucketProxy) *ResponderSyncSession { + return &ResponderSyncSession{ + sessionID: 0, + currentState: START, + node: bucketProxy.MerkleTree().RootNode(), + maxDepth: bucketProxy.MerkleTree().Depth(), + bucketProxy: bucketProxy, + iter: nil, + } +} + +func (syncSession *ResponderSyncSession) State() int { + return syncSession.currentState +} + +func (syncSession *ResponderSyncSession) SetState(state int) { + syncSession.currentState = state +} + +func (syncSession *ResponderSyncSession) SetInitiatorDepth(d uint8) { + syncSession.theirDepth = d +} + +func (syncSession *ResponderSyncSession) InitiatorDepth() uint8 { + return syncSession.theirDepth +} + +func (syncSession *ResponderSyncSession) NextState(syncMessageWrapper *SyncMessageWrapper) *SyncMessageWrapper { + var messageWrapper *SyncMessageWrapper + + switch syncSession.currentState { + case START: + if syncMessageWrapper != nil { + syncSession.sessionID = syncMessageWrapper.SessionID + } + + if syncMessageWrapper == nil || syncMessageWrapper.MessageType != SYNC_START { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + if syncMessageWrapper.MessageBody.(Start).ProtocolVersion != PROTOCOL_VERSION { + Log.Warningf("Responder Session %d: responder protocol version is at %d which is unsupported by this database peer. Aborting...", syncSession.sessionID, syncMessageWrapper.MessageBody.(Start).ProtocolVersion) + + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + syncSession.theirDepth = syncMessageWrapper.MessageBody.(Start).MerkleDepth + syncSession.currentState = HASH_COMPARE + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_START, + MessageBody: Start{ + ProtocolVersion: PROTOCOL_VERSION, + MerkleDepth: syncSession.bucketProxy.MerkleTree().Depth(), + Bucket: syncSession.bucketProxy.Name(), + }, + } + + break + case HASH_COMPARE: + if syncMessageWrapper == nil || syncMessageWrapper.MessageType != SYNC_NODE_HASH && syncMessageWrapper.MessageType != SYNC_OBJECT_NEXT { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + if syncMessageWrapper.MessageType == SYNC_NODE_HASH { + nodeID := syncMessageWrapper.MessageBody.(MerkleNodeHash).NodeID + + if nodeID >= syncSession.bucketProxy.MerkleTree().NodeLimit() || nodeID == 0 { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + nodeHash := syncSession.bucketProxy.MerkleTree().NodeHash(nodeID) + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_NODE_HASH, + MessageBody: MerkleNodeHash{ + NodeID: syncSession.bucketProxy.MerkleTree().TranslateNode(nodeID, syncSession.theirDepth), + HashHigh: nodeHash.High(), + HashLow: nodeHash.Low(), + }, + } + + break + } + + // if items to iterate over, send first + nodeID := syncMessageWrapper.MessageBody.(ObjectNext).NodeID + syncSession.currentIterationNode = nodeID + iter, err := syncSession.bucketProxy.GetSyncChildren(nodeID) + + if err != nil { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + if !iter.Next() { + err := iter.Error() + iter.Release() + + if err == nil { + syncSession.currentState = DB_OBJECT_PUSH + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_PUSH_DONE, + MessageBody: PushDone{ }, + } + + break + } + + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + syncSession.iter = iter + syncSession.currentState = DB_OBJECT_PUSH + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_PUSH_MESSAGE, + MessageBody: PushMessage{ + Key: string(iter.Key()), + Value: iter.Value(), + }, + } + + break + case DB_OBJECT_PUSH: + if syncMessageWrapper == nil || syncMessageWrapper.MessageType != SYNC_OBJECT_NEXT { + if syncSession.iter != nil { + syncSession.iter.Release() + } + + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + if syncSession.iter == nil { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + if syncSession.currentIterationNode != syncMessageWrapper.MessageBody.(ObjectNext).NodeID { + if syncSession.iter != nil { + syncSession.iter.Release() + } + + syncSession.currentIterationNode = syncMessageWrapper.MessageBody.(ObjectNext).NodeID + iter, err := syncSession.bucketProxy.GetSyncChildren(syncSession.currentIterationNode) + + if err != nil { + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + syncSession.iter = iter + } + + if !syncSession.iter.Next() { + if syncSession.iter != nil { + err := syncSession.iter.Error() + + syncSession.iter.Release() + + if err == nil { + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_PUSH_DONE, + MessageBody: PushDone{ }, + } + + break + } + } + + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + + break + } + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_PUSH_MESSAGE, + MessageBody: PushMessage{ + Key: string(syncSession.iter.Key()), + Value: syncSession.iter.Value(), + }, + } + + break + case END: + return nil + } + + if syncSession.bucketProxy.MerkleTree().Error() != nil { + // Encountered a proxy error with the merkle tree + // need to abort + Log.Errorf("Initiator sync session %d encountered a merkle tree error: %v", syncSession.sessionID, syncSession.bucketProxy.MerkleTree().Error()) + + syncSession.currentState = END + + messageWrapper = &SyncMessageWrapper{ + SessionID: syncSession.sessionID, + MessageType: SYNC_ABORT, + MessageBody: Abort{ }, + } + } + + return messageWrapper +} + +const ( + SYNC_START = iota + SYNC_ABORT = iota + SYNC_NODE_HASH = iota + SYNC_OBJECT_NEXT = iota + SYNC_PUSH_MESSAGE = iota + REQUEST = iota + RESPONSE = iota + PUSH = iota + SYNC_PUSH_DONE = iota +) + +func MessageTypeName(m int) string { + names := map[int]string{ + SYNC_START: "SYNC_START", + SYNC_ABORT: "SYNC_ABORT", + SYNC_NODE_HASH: "SYNC_NODE_HASH", + SYNC_OBJECT_NEXT: "SYNC_OBJECT_NEXT", + SYNC_PUSH_MESSAGE: "SYNC_PUSH_MESSAGE", + SYNC_PUSH_DONE: "SYNC_PUSH_DONE", + } + + return names[m] +} + +type rawSyncMessageWrapper struct { + SessionID uint `json:"sessionID"` + MessageType int `json:"type"` + MessageBody json.RawMessage `json:"body"` + Direction uint `json:"dir"` + nodeID string +} + +type SyncMessageWrapper struct { + SessionID uint `json:"sessionID"` + MessageType int `json:"type"` + MessageBody interface{ } `json:"body"` + Direction uint `json:"dir"` + nodeID string +} + +type Start struct { + ProtocolVersion uint + MerkleDepth uint8 + Bucket string +} + +type Abort struct { +} + +type MerkleNodeHash struct { + NodeID uint32 + HashHigh uint64 + HashLow uint64 +} + +type ObjectNext struct { + NodeID uint32 +} + +type PushMessage struct { + Bucket string + Key string + Value *SiblingSet +} + +type PushDone struct { +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/shared/config.go b/vendor/github.com/armPelionEdge/devicedb/shared/config.go new file mode 100644 index 0000000..1b4e0c8 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/shared/config.go @@ -0,0 +1,267 @@ +package shared +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "crypto/tls" + "io/ioutil" + "errors" + "fmt" + "gopkg.in/yaml.v2" + "path/filepath" + + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/merkle" +) + +type YAMLServerConfig struct { + DBFile string `yaml:"db"` + Port int `yaml:"port"` + MaxSyncSessions int `yaml:"syncSessionLimit"` + SyncSessionPeriod uint64 `yaml:"syncSessionPeriod"` + SyncPushBroadcastLimit uint64 `yaml:"syncPushBroadcastLimit"` + SyncExplorationPathLimit uint32 `yaml:"syncExplorationPathLimit"` + GCInterval uint64 `yaml:"gcInterval"` + GCPurgeAge uint64 `yaml:"gcPurgeAge"` + MerkleDepth uint8 `yaml:"merkleDepth"` + NodeID string `yaml:"nodeid"` + Peers []YAMLPeer `yaml:"peers"` + TLS YAMLTLSFiles `yaml:"tls"` + LogLevel string `yaml:"logLevel"` + Cloud *YAMLCloud `yaml:"cloud"` + History *YAMLHistory `yaml:"history"` + Alerts *YAMLAlerts `yaml:"alerts"` +} + +type YAMLHistory struct { + PurgeOnForward bool `yaml:"purgeOnForward"` + EventLimit uint64 `yaml:"eventLimit"` + EventFloor uint64 `yaml:"eventFloor"` + PurgeBatchSize int `yaml:"purgeBatchSize"` + ForwardInterval uint64 `yaml:"forwardInterval"` + ForwardBatchSize uint64 `yaml:"forwardBatchSize"` + ForwardThreshold uint64 `yaml:"forwardThreshold"` +} + +type YAMLAlerts struct { + ForwardInterval uint64 `yaml:"forwardInterval"` +} + +type YAMLPeer struct { + ID string `yaml:"id"` + Host string `yaml:"host"` + Port int `yaml:"port"` +} + +type YAMLCloud struct { + ID string `yaml:"id"` + URI string `yaml:"uri"` + HistoryID string `yaml:"historyID"` + HistoryURI string `yaml:"historyURI"` + AlertsID string `yaml:"alertsID"` + AlertsURI string `yaml:"alertsURI"` + NoValidate bool `yaml:"noValidate"` +} + +type YAMLTLSFiles struct { + ClientCertificate string `yaml:"clientCertificate"` + ClientKey string `yaml:"clientKey"` + ServerCertificate string `yaml:"serverCertificate"` + ServerKey string `yaml:"serverKey"` + Certificate string `yaml:"certificate"` + Key string `yaml:"key"` + RootCA string `yaml:"rootCA"` +} + +func (ysc *YAMLServerConfig) LoadFromFile(file string) error { + rawConfig, err := ioutil.ReadFile(file) + + if err != nil { + return err + } + + err = yaml.Unmarshal(rawConfig, ysc) + + if err != nil { + return err + } + + if !isValidPort(ysc.Port) { + return errors.New(fmt.Sprintf("%d is an invalid port for the database server", ysc.Port)) + } + + if ysc.MerkleDepth < MerkleMinDepth || ysc.MerkleDepth > MerkleMaxDepth { + return errors.New(fmt.Sprintf("Invalid merkle depth specified. Valid ranges are from %d to %d inclusive", MerkleMinDepth, MerkleMaxDepth)) + } + + if ysc.MaxSyncSessions <= 0 { + return errors.New("syncSessionLimit must be at least 1") + } + + if ysc.SyncSessionPeriod == 0 { + return errors.New("syncSessionPeriod must be positive") + } + + if ysc.Peers != nil { + for _, peer := range ysc.Peers { + if len(peer.ID) == 0 { + return errors.New(fmt.Sprintf("Peer ID is empty")) + } + + if len(peer.Host) == 0 { + return errors.New(fmt.Sprintf("The host name is empty for peer %s", peer.ID)) + } + + if !isValidPort(peer.Port) { + return errors.New(fmt.Sprintf("%d is an invalid port to connect to peer %s at %s", peer.Port, peer.ID, peer.Host)) + } + } + } + + if ysc.Cloud != nil { + if len(ysc.Cloud.URI) == 0 { + return errors.New(fmt.Sprintf("The cloud.uri is empty")) + } + + if len(ysc.Cloud.HistoryID) == 0 { + ysc.Cloud.HistoryID = ysc.Cloud.ID + } + + if len(ysc.Cloud.AlertsID) == 0 { + ysc.Cloud.AlertsID = ysc.Cloud.ID + } + } + + if ysc.History == nil { + ysc.History = &YAMLHistory{ } + } + + if ysc.Alerts == nil { + ysc.Alerts = &YAMLAlerts{ } + } + + if ysc.History.ForwardInterval < 1000 { + return errors.New(fmt.Sprintf("history.forwardInterval must be at least 1000")) + } + + if ysc.Alerts.ForwardInterval < 1000 { + return errors.New(fmt.Sprintf("alerts.forwardInterval must be at least 1000")) + } + + if (YAMLTLSFiles{}) != ysc.TLS { + if len(ysc.TLS.ClientCertificate) == 0 { + ysc.TLS.ClientCertificate = ysc.TLS.Certificate + } + + if len(ysc.TLS.ServerCertificate) == 0 { + ysc.TLS.ServerCertificate = ysc.TLS.Certificate + } + + if len(ysc.TLS.ClientKey) == 0 { + ysc.TLS.ClientKey = ysc.TLS.Key + } + + if len(ysc.TLS.ServerKey) == 0 { + ysc.TLS.ServerKey = ysc.TLS.Key + } + + clientCertificate, err := ioutil.ReadFile(resolveFilePath(file, ysc.TLS.ClientCertificate)) + + if err != nil { + return errors.New(fmt.Sprintf("Could not load client certificate from %s", ysc.TLS.ClientCertificate)) + } + + clientKey, err := ioutil.ReadFile(resolveFilePath(file, ysc.TLS.ClientKey)) + + if err != nil { + return errors.New(fmt.Sprintf("Could not load client key from %s", ysc.TLS.ClientKey)) + } + + serverCertificate, err := ioutil.ReadFile(resolveFilePath(file, ysc.TLS.ServerCertificate)) + + if err != nil { + return errors.New(fmt.Sprintf("Could not load server certificate from %s", ysc.TLS.ServerCertificate)) + } + + serverKey, err := ioutil.ReadFile(resolveFilePath(file, ysc.TLS.ServerKey)) + + if err != nil { + return errors.New(fmt.Sprintf("Could not load server key from %s", ysc.TLS.ServerKey)) + } + + rootCA, err := ioutil.ReadFile(resolveFilePath(file, ysc.TLS.RootCA)) + + if err != nil { + return errors.New(fmt.Sprintf("Could not load root CA chain from %s", ysc.TLS.RootCA)) + } + + ysc.TLS.ClientCertificate = string(clientCertificate) + ysc.TLS.ClientKey = string(clientKey) + ysc.TLS.ServerCertificate = string(serverCertificate) + ysc.TLS.ServerKey = string(serverKey) + ysc.TLS.RootCA = string(rootCA) + + _, err = tls.X509KeyPair([]byte(ysc.TLS.ClientCertificate), []byte(ysc.TLS.ClientKey)) + + if err != nil { + return errors.New("The specified client certificate and key represent an invalid public/private key pair") + } + + _, err = tls.X509KeyPair([]byte(ysc.TLS.ServerCertificate), []byte(ysc.TLS.ServerKey)) + + if err != nil { + return errors.New("The specified server certificate and key represent an invalid public/private key pair") + } + } + + // purge age must be at least ten minutes + if ysc.GCPurgeAge < 600000 { + return errors.New("The gc purge age must be at least ten minutes (i.e. gcPurgeAge: 600000)") + } + + if ysc.GCInterval < 300000 { + return errors.New("The gc interval must be at least five minutes (i.e. gcInterval: 300000)") + } + + if ysc.SyncExplorationPathLimit == 0 { + ysc.SyncExplorationPathLimit = 1000 + } + + SetLoggingLevel(ysc.LogLevel) + + return nil +} + +func isValidPort(p int) bool { + return p >= 0 && p < (1 << 16) +} + +func resolveFilePath(configFileLocation, file string) string { + if filepath.IsAbs(file) { + return file + } + + return filepath.Join(filepath.Dir(configFileLocation), file) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/shared/gc.go b/vendor/github.com/armPelionEdge/devicedb/shared/gc.go new file mode 100644 index 0000000..ff010a3 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/shared/gc.go @@ -0,0 +1,69 @@ +package shared +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "time" + + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/bucket" +) + +type GarbageCollector struct { + buckets *BucketList + gcInterval time.Duration + gcPurgeAge uint64 + done chan bool +} + +func NewGarbageCollector(buckets *BucketList, gcInterval uint64, gcPurgeAge uint64) *GarbageCollector { + return &GarbageCollector{ + buckets: buckets, + gcInterval: time.Millisecond * time.Duration(gcInterval), + gcPurgeAge: gcPurgeAge, + done: make(chan bool), + } +} + +func (garbageCollector *GarbageCollector) Start() { + go func() { + for { + select { + case <-garbageCollector.done: + garbageCollector.done = make(chan bool) + return + case <-time.After(garbageCollector.gcInterval): + for _, bucket := range garbageCollector.buckets.All() { + Log.Infof("Performing garbage collection sweep on %s bucket", bucket.Name()) + bucket.GarbageCollect(garbageCollector.gcPurgeAge) + } + } + } + }() +} + +func (garbageCollector *GarbageCollector) Stop() { + close(garbageCollector.done) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/site/site.go b/vendor/github.com/armPelionEdge/devicedb/site/site.go new file mode 100644 index 0000000..89c8ee8 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/site/site.go @@ -0,0 +1,124 @@ +package site +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/bucket" +) + +type Site interface { + Buckets() *BucketList + Iterator() SiteIterator + ID() string + LockWrites() + UnlockWrites() + LockReads() + UnlockReads() +} + +type RelaySiteReplica struct { + bucketList *BucketList + id string +} + +func NewRelaySiteReplica(id string, buckets *BucketList) *RelaySiteReplica { + return &RelaySiteReplica{ + id: id, + bucketList: buckets, + } +} + +func (relaySiteReplica *RelaySiteReplica) Buckets() *BucketList { + if relaySiteReplica == nil { + return NewBucketList() + } + + return relaySiteReplica.bucketList +} + +func (relaySiteReplica *RelaySiteReplica) ID() string { + return relaySiteReplica.id +} + +func (relaySiteReplica *RelaySiteReplica) Iterator() SiteIterator { + return &RelaySiteIterator{ } +} + +func (relaySiteReplica *RelaySiteReplica) LockWrites() { +} + +func (relaySiteReplica *RelaySiteReplica) UnlockWrites() { +} + +func (relaySiteReplica *RelaySiteReplica) LockReads() { +} + +func (relaySiteReplica *RelaySiteReplica) UnlockReads() { +} + +type CloudSiteReplica struct { + bucketList *BucketList + id string +} + +func (cloudSiteReplica *CloudSiteReplica) Buckets() *BucketList { + if cloudSiteReplica == nil { + return NewBucketList() + } + + return cloudSiteReplica.bucketList +} + +func (cloudSiteReplica *CloudSiteReplica) ID() string { + return cloudSiteReplica.id +} + +func (cloudSiteReplica *CloudSiteReplica) Iterator() SiteIterator { + return &CloudSiteIterator{ buckets: cloudSiteReplica.bucketList.All() } +} + +func (cloudSiteReplica *CloudSiteReplica) LockWrites() { + for _, bucket := range cloudSiteReplica.bucketList.All() { + bucket.LockWrites() + } +} + +func (cloudSiteReplica *CloudSiteReplica) UnlockWrites() { + for _, bucket := range cloudSiteReplica.bucketList.All() { + bucket.UnlockWrites() + } +} + +func (cloudSiteReplica *CloudSiteReplica) LockReads() { + for _, bucket := range cloudSiteReplica.bucketList.All() { + bucket.LockReads() + } +} + +func (cloudSiteReplica *CloudSiteReplica) UnlockReads() { + for _, bucket := range cloudSiteReplica.bucketList.All() { + bucket.UnlockReads() + } +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/site/site_factory.go b/vendor/github.com/armPelionEdge/devicedb/site/site_factory.go new file mode 100644 index 0000000..10df6bd --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/site/site_factory.go @@ -0,0 +1,113 @@ +package site +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/bucket/builtin" + . "github.com/armPelionEdge/devicedb/merkle" + . "github.com/armPelionEdge/devicedb/storage" +) + +var keyStorePrefix = []byte{ 0 } + +const ( + defaultNodePrefix = iota + cloudNodePrefix = iota + lwwNodePrefix = iota + localNodePrefix = iota + historianPrefix = iota + alertsLogPrefix = iota +) + +type SiteFactory interface { + CreateSite(siteID string) Site +} + +type RelaySiteFactory struct { + MerkleDepth uint8 + StorageDriver StorageDriver + RelayID string +} + +func (relaySiteFactory *RelaySiteFactory) CreateSite(siteID string) Site { + bucketList := NewBucketList() + + defaultBucket, _ := NewDefaultBucket(relaySiteFactory.RelayID, NewPrefixedStorageDriver([]byte{ defaultNodePrefix }, relaySiteFactory.StorageDriver), relaySiteFactory.MerkleDepth) + cloudBucket, _ := NewCloudBucket(relaySiteFactory.RelayID, NewPrefixedStorageDriver([]byte{ cloudNodePrefix }, relaySiteFactory.StorageDriver), relaySiteFactory.MerkleDepth, RelayMode) + lwwBucket, _ := NewLWWBucket(relaySiteFactory.RelayID, NewPrefixedStorageDriver([]byte{ lwwNodePrefix }, relaySiteFactory.StorageDriver), relaySiteFactory.MerkleDepth) + localBucket, _ := NewLocalBucket(relaySiteFactory.RelayID, NewPrefixedStorageDriver([]byte{ localNodePrefix }, relaySiteFactory.StorageDriver), MerkleMinDepth) + + bucketList.AddBucket(defaultBucket) + bucketList.AddBucket(lwwBucket) + bucketList.AddBucket(cloudBucket) + bucketList.AddBucket(localBucket) + + return &RelaySiteReplica{ + bucketList: bucketList, + id: siteID, + } +} + +type CloudSiteFactory struct { + NodeID string + MerkleDepth uint8 + StorageDriver StorageDriver +} + +func (cloudSiteFactory *CloudSiteFactory) siteBucketStorageDriver(siteID string, bucketPrefix []byte) StorageDriver { + return NewPrefixedStorageDriver(cloudSiteFactory.siteBucketPrefix(siteID, bucketPrefix), cloudSiteFactory.StorageDriver) +} + +func (cloudSiteFactory *CloudSiteFactory) siteBucketPrefix(siteID string, bucketPrefix []byte) []byte { + prefix := make([]byte, 0, len(keyStorePrefix) + len([]byte(siteID)) + len([]byte(".")) + len(bucketPrefix) + len([]byte("."))) + + prefix = append(prefix, keyStorePrefix...) + prefix = append(prefix, []byte(siteID)...) + prefix = append(prefix, []byte(".")...) + prefix = append(prefix, bucketPrefix...) + prefix = append(prefix, []byte(".")...) + + return prefix +} + +func (cloudSiteFactory *CloudSiteFactory) CreateSite(siteID string) Site { + bucketList := NewBucketList() + + defaultBucket, _ := NewDefaultBucket(cloudSiteFactory.NodeID, cloudSiteFactory.siteBucketStorageDriver(siteID, []byte{ defaultNodePrefix }), cloudSiteFactory.MerkleDepth) + cloudBucket, _ := NewCloudBucket(cloudSiteFactory.NodeID, cloudSiteFactory.siteBucketStorageDriver(siteID, []byte{ cloudNodePrefix }), cloudSiteFactory.MerkleDepth, CloudMode) + lwwBucket, _ := NewLWWBucket(cloudSiteFactory.NodeID, cloudSiteFactory.siteBucketStorageDriver(siteID, []byte{ lwwNodePrefix }), cloudSiteFactory.MerkleDepth) + localBucket, _ := NewLocalBucket(cloudSiteFactory.NodeID, cloudSiteFactory.siteBucketStorageDriver(siteID, []byte{ localNodePrefix }), MerkleMinDepth) + + bucketList.AddBucket(defaultBucket) + bucketList.AddBucket(lwwBucket) + bucketList.AddBucket(cloudBucket) + bucketList.AddBucket(localBucket) + + return &CloudSiteReplica{ + bucketList: bucketList, + id: siteID, + } +} diff --git a/vendor/github.com/armPelionEdge/devicedb/site/site_iterator.go b/vendor/github.com/armPelionEdge/devicedb/site/site_iterator.go new file mode 100644 index 0000000..92c9e84 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/site/site_iterator.go @@ -0,0 +1,159 @@ +package site +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/data" +) + +type SiteIterator interface { + Next() bool + // The site that the current entry belongs to + Bucket() string + // The key of the current entry + Key() string + // The value of the current entry + Value() *SiblingSet + // The checksum of the current entry + Release() + Error() error +} + +type RelaySiteIterator struct { +} + +func (relaySiteIterator *RelaySiteIterator) Next() bool { + return false +} + +func (relaySiteIterator *RelaySiteIterator) Bucket() string { + return "" +} + +func (relaySiteIterator *RelaySiteIterator) Key() string { + return "" +} + +func (relaySiteIterator *RelaySiteIterator) Value() *SiblingSet { + return nil +} + +func (relaySiteIterator *RelaySiteIterator) Release() { +} + +func (relaySiteIterator *RelaySiteIterator) Error() error { + return nil +} + +type CloudSiteIterator struct { + buckets []Bucket + currentIterator SiblingSetIterator + currentBucket string + currentKey string + currentValue *SiblingSet + err error +} + +func (cloudSiteIterator *CloudSiteIterator) Next() bool { + if cloudSiteIterator.currentIterator == nil { + if len(cloudSiteIterator.buckets) == 0 { + return false + } + + nextBucket := cloudSiteIterator.buckets[0] + cloudSiteIterator.currentBucket = nextBucket.Name() + + iter, err := nextBucket.GetAll() + + if err != nil { + cloudSiteIterator.err = err + cloudSiteIterator.Release() + + return false + } + + cloudSiteIterator.currentIterator = iter + cloudSiteIterator.buckets = cloudSiteIterator.buckets[1:] + } + + if !cloudSiteIterator.currentIterator.Next() { + if cloudSiteIterator.currentIterator.Error() != nil { + cloudSiteIterator.err = cloudSiteIterator.currentIterator.Error() + cloudSiteIterator.Release() + + return false + } + + cloudSiteIterator.currentIterator = nil + + return cloudSiteIterator.Next() + } + + cloudSiteIterator.currentKey = string(cloudSiteIterator.currentIterator.Key()) + cloudSiteIterator.currentValue = cloudSiteIterator.currentIterator.Value() + + return true +} + +func (cloudSiteIterator *CloudSiteIterator) Bucket() string { + if cloudSiteIterator == nil { + return "" + } + + return cloudSiteIterator.currentBucket +} + +func (cloudSiteIterator *CloudSiteIterator) Key() string { + if cloudSiteIterator == nil { + return "" + } + + return cloudSiteIterator.currentKey +} + +func (cloudSiteIterator *CloudSiteIterator) Value() *SiblingSet { + if cloudSiteIterator == nil { + return nil + } + + return cloudSiteIterator.currentValue +} + +func (cloudSiteIterator *CloudSiteIterator) Release() { + if cloudSiteIterator.currentIterator != nil { + cloudSiteIterator.currentIterator.Release() + } + + cloudSiteIterator.currentIterator = nil + cloudSiteIterator.buckets = nil + cloudSiteIterator.currentBucket = "" + cloudSiteIterator.currentKey = "" + cloudSiteIterator.currentValue = nil +} + +func (cloudSiteIterator *CloudSiteIterator) Error() error { + return cloudSiteIterator.err +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/site/site_pool.go b/vendor/github.com/armPelionEdge/devicedb/site/site_pool.go new file mode 100644 index 0000000..9d4e6d5 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/site/site_pool.go @@ -0,0 +1,200 @@ +package site +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "sync" +) + +type SitePool interface { + // Called when client needs to access site. This does not guaruntee + // exclusive access it merely ensures that the site pool does not + // dispose of the underlying site + Acquire(siteID string) Site + // Called when client no longer needs access to a site + Release(siteID string) + // Call when a site should be added to the pool + Add(siteID string) + // Called when a site should be removed from the pool + Remove(siteID string) + // Iterate over all sites that exist in the site pool + Iterator() SitePoolIterator + // Ensure no new writes can occur to any sites in this site pool + LockWrites() + // Ensure writes can occur to sites in this site pool + UnlockWrites() + // Ensure no new reads can occur from any sites in this site pool + LockReads() + // Ensure reads can occur from sites in this site pool + UnlockReads() +} + +// A relay only ever contains one site database +type RelayNodeSitePool struct { + Site Site +} + +func (relayNodeSitePool *RelayNodeSitePool) Acquire(siteID string) Site { + return relayNodeSitePool.Site +} + +func (relayNodeSitePool *RelayNodeSitePool) Release(siteID string) { +} + +func (relayNodeSitePool *RelayNodeSitePool) Add(siteID string) { +} + +func (relayNodeSitePool *RelayNodeSitePool) Remove(siteID string) { +} + +func (relayNodeSitePool *RelayNodeSitePool) Iterator() SitePoolIterator { + return &RelaySitePoolIterator{ } +} + +func (relayNodeSitePool *RelayNodeSitePool) LockWrites() { +} + +func (relayNodeSitePool *RelayNodeSitePool) UnlockWrites() { +} + +func (relayNodeSitePool *RelayNodeSitePool) LockReads() { +} + +func (relayNodeSitePool *RelayNodeSitePool) UnlockReads() { +} + +type CloudNodeSitePool struct { + SiteFactory SiteFactory + lock sync.Mutex + sites map[string]Site + writesLocked bool + readsLocked bool +} + +func (cloudNodeSitePool *CloudNodeSitePool) Acquire(siteID string) Site { + cloudNodeSitePool.lock.Lock() + defer cloudNodeSitePool.lock.Unlock() + + site, ok := cloudNodeSitePool.sites[siteID] + + if !ok { + return nil + } + + if site == nil { + cloudNodeSitePool.sites[siteID] = cloudNodeSitePool.SiteFactory.CreateSite(siteID) + } + + if cloudNodeSitePool.readsLocked { + cloudNodeSitePool.sites[siteID].LockReads() + } + + if cloudNodeSitePool.writesLocked { + cloudNodeSitePool.sites[siteID].LockWrites() + } + + return cloudNodeSitePool.sites[siteID] +} + +func (cloudNodeSitePool *CloudNodeSitePool) Release(siteID string) { +} + +func (cloudNodeSitePool *CloudNodeSitePool) Add(siteID string) { + cloudNodeSitePool.lock.Lock() + defer cloudNodeSitePool.lock.Unlock() + + if cloudNodeSitePool.sites == nil { + cloudNodeSitePool.sites = make(map[string]Site) + } + + if _, ok := cloudNodeSitePool.sites[siteID]; !ok { + cloudNodeSitePool.sites[siteID] = nil + } +} + +func (cloudNodeSitePool *CloudNodeSitePool) Remove(siteID string) { + cloudNodeSitePool.lock.Lock() + defer cloudNodeSitePool.lock.Unlock() + + delete(cloudNodeSitePool.sites, siteID) +} + +func (cloudNodeSitePool *CloudNodeSitePool) Iterator() SitePoolIterator { + cloudNodeSitePool.lock.Lock() + defer cloudNodeSitePool.lock.Unlock() + + // take a snapshot of the currently available sites + sites := make([]string, 0, len(cloudNodeSitePool.sites)) + + for siteID, _ := range cloudNodeSitePool.sites { + sites = append(sites, siteID) + } + + return &CloudSitePoolterator{ sites: sites, sitePool: cloudNodeSitePool } +} + +func (cloudNodeSitePool *CloudNodeSitePool) LockWrites() { + cloudNodeSitePool.lock.Lock() + defer cloudNodeSitePool.lock.Unlock() + + cloudNodeSitePool.writesLocked = true + + for _, site := range cloudNodeSitePool.sites { + site.LockWrites() + } +} + +func (cloudNodeSitePool *CloudNodeSitePool) UnlockWrites() { + cloudNodeSitePool.lock.Lock() + defer cloudNodeSitePool.lock.Unlock() + + cloudNodeSitePool.writesLocked = false + + for _, site := range cloudNodeSitePool.sites { + site.UnlockWrites() + } +} + +func (cloudNodeSitePool *CloudNodeSitePool) LockReads() { + cloudNodeSitePool.lock.Lock() + defer cloudNodeSitePool.lock.Unlock() + + cloudNodeSitePool.readsLocked = true + + for _, site := range cloudNodeSitePool.sites { + site.LockReads() + } +} + +func (cloudNodeSitePool *CloudNodeSitePool) UnlockReads() { + cloudNodeSitePool.lock.Lock() + defer cloudNodeSitePool.lock.Unlock() + + cloudNodeSitePool.readsLocked = false + + for _, site := range cloudNodeSitePool.sites { + site.UnlockReads() + } +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/site/site_pool_iterator.go b/vendor/github.com/armPelionEdge/devicedb/site/site_pool_iterator.go new file mode 100644 index 0000000..2862566 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/site/site_pool_iterator.go @@ -0,0 +1,168 @@ +package site +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "errors" + + . "github.com/armPelionEdge/devicedb/data" +) + +var EDecodeKey = errors.New("Unable to decode key in store") + +type SitePoolIterator interface { + Next() bool + // The site that the current entry belongs to + Site() string + // The bucket that the current entry belongs to within its site + Bucket() string + // The key of the current entry + Key() string + // The value of the current entry + Value() *SiblingSet + // The checksum of the current entry + Release() + Error() error +} + +type RelaySitePoolIterator struct { +} + +func (relaySitePoolIterator *RelaySitePoolIterator) Next() bool { + return true +} + +func (relaySitePoolIterator *RelaySitePoolIterator) Site() string { + return "" +} + +func (relaySitePoolIterator *RelaySitePoolIterator) Bucket() string { + return "" +} + +func (relaySitePoolIterator *RelaySitePoolIterator) Key() string { + return "" +} + +func (relaySitePoolIterator *RelaySitePoolIterator) Value() *SiblingSet { + return nil +} + +func (relaySitePoolIterator *RelaySitePoolIterator) Release() { +} + +func (relaySitePoolIterator *RelaySitePoolIterator) Error() error { + return nil +} + +type CloudSitePoolterator struct { + currentSite string + currentSiteIterator SiteIterator + sites []string + sitePool SitePool + err error +} + +func (cloudSitePoolIterator *CloudSitePoolterator) Next() bool { + for { + if cloudSitePoolIterator.currentSiteIterator == nil { + if len(cloudSitePoolIterator.sites) == 0 { + cloudSitePoolIterator.Release() + + return false + } + + nextSiteID := cloudSitePoolIterator.sites[0] + nextSite := cloudSitePoolIterator.sitePool.Acquire(nextSiteID) + cloudSitePoolIterator.sites = cloudSitePoolIterator.sites[1:] + + if nextSite == nil { + // This site must have been removed since iteration started + // try again with the next site + continue + } + + cloudSitePoolIterator.currentSite = nextSite.ID() + cloudSitePoolIterator.currentSiteIterator = nextSite.Iterator() + } + + if !cloudSitePoolIterator.currentSiteIterator.Next() { + cloudSitePoolIterator.currentSiteIterator.Release() + cloudSitePoolIterator.sitePool.Release(cloudSitePoolIterator.currentSite) + cloudSitePoolIterator.currentSite = "" + + if cloudSitePoolIterator.currentSiteIterator.Error() != nil { + cloudSitePoolIterator.err = cloudSitePoolIterator.currentSiteIterator.Error() + cloudSitePoolIterator.currentSiteIterator = nil + cloudSitePoolIterator.Release() + + return false + } + + cloudSitePoolIterator.currentSiteIterator = nil + + continue + } + + return true + } +} + +func (cloudSitePoolIterator *CloudSitePoolterator) Site() string { + return cloudSitePoolIterator.currentSite +} + +func (cloudSitePoolIterator *CloudSitePoolterator) Bucket() string { + return cloudSitePoolIterator.currentSiteIterator.Bucket() +} + +func (cloudSitePoolIterator *CloudSitePoolterator) Key() string { + return cloudSitePoolIterator.currentSiteIterator.Key() +} + +func (cloudSitePoolIterator *CloudSitePoolterator) Value() *SiblingSet { + return cloudSitePoolIterator.currentSiteIterator.Value() +} + +func (cloudSitePoolIterator *CloudSitePoolterator) Release() { + if cloudSitePoolIterator.currentSiteIterator != nil { + cloudSitePoolIterator.sitePool.Release(cloudSitePoolIterator.currentSite) + cloudSitePoolIterator.currentSiteIterator.Release() + } + + cloudSitePoolIterator.currentSiteIterator = nil + cloudSitePoolIterator.currentSite = "" + + for _, site := range cloudSitePoolIterator.sites { + cloudSitePoolIterator.sitePool.Release(site) + } + + cloudSitePoolIterator.sites = nil + cloudSitePoolIterator.sitePool = nil +} + +func (cloudSitePoolIterator *CloudSitePoolterator) Error() error { + return cloudSitePoolIterator.err +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/storage/metrics.go b/vendor/github.com/armPelionEdge/devicedb/storage/metrics.go new file mode 100644 index 0000000..3da5404 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/storage/metrics.go @@ -0,0 +1,50 @@ +package storage +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +var ( + prometheusStorageErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "devicedb_storage_errors", + Help: "Counts the number of errors encountered in the disk storage layer", + }, []string{ + "operation", + "path", + }) +) + +func init() { + prometheus.MustRegister(prometheusStorageErrors) +} + +func prometheusRecordStorageError(operation, path string) { + prometheusStorageErrors.With(prometheus.Labels{ + "operation": operation, + "path": path, + }).Inc() +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/storage/storageEngine.go b/vendor/github.com/armPelionEdge/devicedb/storage/storageEngine.go new file mode 100644 index 0000000..7751c7e --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/storage/storageEngine.go @@ -0,0 +1,808 @@ +package storage +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "strings" + "errors" + "sort" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/util" + levelErrors "github.com/syndtr/goleveldb/leveldb/errors" + + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" +) + +const ( + PUT = iota + DEL = iota + FORWARD = iota + BACKWARD = iota +) + +var ( + CopyBatchSize = 1000 + CopyBatchMaxBytes = 5 * 1024 * 1024 // 5 MB +) + +type Op struct { + OpType int `json:"type"` + OpKey []byte `json:"key"` + OpValue []byte `json:"value"` +} + +func (o *Op) IsDelete() bool { + return o.OpType == DEL +} + +func (o *Op) IsPut() bool { + return o.OpType == PUT +} + +func (o *Op) Key() []byte { + return o.OpKey +} + +func (o *Op) Value() []byte { + return o.OpValue +} + +type OpList []Op + +func (opList OpList) Len() int { + return len(opList) +} + +func (opList OpList) Less(i, j int) bool { + k1 := opList[i].Key() + k2 := opList[i].Key() + + for i := 0; i < len(k1) && i < len(k2); i += 1 { + if k2[i] > k1[i] { + return false + } + } + + return true +} + +func (opList OpList) Swap(i, j int) { + opList[i], opList[j] = opList[j], opList[i] +} + +type Batch struct { + BatchOps map[string]Op `json:"ops"` +} + +func NewBatch() *Batch { + return &Batch{ make(map[string]Op) } +} + +func (batch *Batch) Size() int { + return len(batch.BatchOps) +} + +func (batch *Batch) Put(key []byte, value []byte) *Batch { + batch.BatchOps[string(key)] = Op{ PUT, key, value } + + return batch +} + +func (batch *Batch) Delete(key []byte) *Batch { + batch.BatchOps[string(key)] = Op{ DEL, key, nil } + + return batch +} + +func (batch *Batch) Ops() map[string]Op { + return batch.BatchOps +} + +func (batch *Batch) SortedOps() []Op { + opList := make([]Op, 0, len(batch.BatchOps)) + + for _, op := range batch.BatchOps { + opList = append(opList, op) + } + + sort.Sort(OpList(opList)) + + return opList +} + +type StorageIterator interface { + Next() bool + Prefix() []byte + Key() []byte + Value() []byte + Release() + Error() error +} + +type PrefixedStorageDriver struct { + prefix []byte + storageDriver StorageDriver +} + +func NewPrefixedStorageDriver(prefix []byte, storageDriver StorageDriver) *PrefixedStorageDriver { + return &PrefixedStorageDriver{ prefix, storageDriver } +} + +func (psd *PrefixedStorageDriver) Open() error { + return nil +} + +func (psd *PrefixedStorageDriver) Close() error { + return nil +} + +func (psd *PrefixedStorageDriver) Recover() error { + return psd.storageDriver.Recover() +} + +func (psd *PrefixedStorageDriver) Compact() error { + return psd.storageDriver.Compact() +} + +func (psd *PrefixedStorageDriver) addPrefix(k []byte) []byte { + result := make([]byte, 0, len(psd.prefix) + len(k)) + + result = append(result, psd.prefix...) + result = append(result, k...) + + return result +} + +func (psd *PrefixedStorageDriver) Get(keys [][]byte) ([][]byte, error) { + prefixKeys := make([][]byte, len(keys)) + + for i, _ := range keys { + prefixKeys[i] = psd.addPrefix(keys[i]) + } + + return psd.storageDriver.Get(prefixKeys) +} + +func (psd *PrefixedStorageDriver) GetMatches(keys [][]byte) (StorageIterator, error) { + prefixKeys := make([][]byte, len(keys)) + + for i, _ := range keys { + prefixKeys[i] = psd.addPrefix(keys[i]) + } + + iter, err := psd.storageDriver.GetMatches(prefixKeys) + + if err != nil { + return nil, err + } + + return &PrefixedIterator{ psd.prefix, iter }, nil +} + +func (psd *PrefixedStorageDriver) GetRange(start []byte, end []byte) (StorageIterator, error) { + iter, err := psd.storageDriver.GetRange(psd.addPrefix(start), psd.addPrefix(end)) + + if err != nil { + return nil, err + } + + return &PrefixedIterator{ psd.prefix, iter }, nil +} + +func (psd *PrefixedStorageDriver) GetRanges(ranges [][2][]byte, direction int) (StorageIterator, error) { + var prefixedRanges = make([][2][]byte, len(ranges)) + + for i := 0; i < len(ranges); i += 1 { + prefixedRanges[i] = [2][]byte{ psd.addPrefix(ranges[i][0]), psd.addPrefix(ranges[i][1]) } + } + + iter, err := psd.storageDriver.GetRanges(prefixedRanges, direction) + + if err != nil { + return nil, err + } + + return &PrefixedIterator{ psd.prefix, iter }, nil +} + +func (psd *PrefixedStorageDriver) Batch(batch *Batch) error { + newBatch := NewBatch() + + for key, op := range batch.BatchOps { + op.OpKey = psd.addPrefix([]byte(key)) + newBatch.BatchOps[string(psd.addPrefix([]byte(key)))] = op + } + + return psd.storageDriver.Batch(newBatch) +} + +func (psd *PrefixedStorageDriver) Snapshot(snapshotDirectory string, metadataPrefix []byte, metadata map[string]string) error { + return psd.storageDriver.Snapshot(snapshotDirectory, metadataPrefix, metadata) +} + +func (psd *PrefixedStorageDriver) OpenSnapshot(snapshotDirectory string) (StorageDriver, error) { + return psd.storageDriver.OpenSnapshot(snapshotDirectory) +} + +func (psd *PrefixedStorageDriver) Restore(storageDriver StorageDriver) error { + return psd.storageDriver.Restore(storageDriver) +} + +type PrefixedIterator struct { + prefix []byte + iterator StorageIterator +} + +func NewPrefixedIterator(iter StorageIterator, prefix []byte) *PrefixedIterator { + return &PrefixedIterator{ prefix, iter } +} + +func (prefixedIterator *PrefixedIterator) Next() bool { + return prefixedIterator.iterator.Next() +} + +func (prefixedIterator *PrefixedIterator) Prefix() []byte { + return prefixedIterator.iterator.Prefix()[len(prefixedIterator.prefix):] +} + +func (prefixedIterator *PrefixedIterator) Key() []byte { + return prefixedIterator.iterator.Key()[len(prefixedIterator.prefix):] +} + +func (prefixedIterator *PrefixedIterator) Value() []byte { + return prefixedIterator.iterator.Value() +} + +func (prefixedIterator *PrefixedIterator) Release() { + prefixedIterator.iterator.Release() +} + +func (prefixedIterator *PrefixedIterator) Error() error { + return prefixedIterator.iterator.Error() +} + +type StorageDriver interface { + Open() error + Close() error + Recover() error + Compact() error + Get([][]byte) ([][]byte, error) + GetMatches([][]byte) (StorageIterator, error) + GetRange([]byte, []byte) (StorageIterator, error) + GetRanges([][2][]byte, int) (StorageIterator, error) + Batch(*Batch) error + Snapshot(snapshotDirectory string, metadataPrefix []byte, metadata map[string]string) error + OpenSnapshot(snapshotDirectory string) (StorageDriver, error) + Restore(storageDriver StorageDriver) error +} + +type LevelDBIterator struct { + snapshot *leveldb.Snapshot + it iterator.Iterator + ranges []*util.Range + prefix []byte + err error + direction int +} + +func (it *LevelDBIterator) Next() bool { + if it.it == nil { + if len(it.ranges) == 0 { + return false + } + + it.prefix = it.ranges[0].Start + it.it = it.snapshot.NewIterator(it.ranges[0], nil) + it.ranges = it.ranges[1:] + + if it.direction == BACKWARD { + if it.it.Last() { + return true + } + + if it.it.Error() != nil { + it.err = it.it.Error() + it.ranges = []*util.Range{ } + } + + it.it.Release() + it.it = nil + it.prefix = nil + + return false + } + } + + if it.direction == BACKWARD { + if it.it.Prev() { + return true + } + } else { + if it.it.Next() { + return true + } + } + + if it.it.Error() != nil { + prometheusRecordStorageError("iterator.next()", "") + it.err = it.it.Error() + it.ranges = []*util.Range{ } + } + + it.it.Release() + it.it = nil + it.prefix = nil + + return it.Next() +} + +func (it *LevelDBIterator) Prefix() []byte { + return it.prefix +} + +func (it *LevelDBIterator) Key() []byte { + if it.it == nil || it.err != nil { + return nil + } + + return it.it.Key() +} + +func (it *LevelDBIterator) Value() []byte { + if it.it == nil || it.err != nil { + return nil + } + + return it.it.Value() +} + +func (it *LevelDBIterator) Release() { + it.prefix = nil + it.ranges = []*util.Range{ } + it.snapshot.Release() + + if it.it == nil { + return + } + + it.it.Release() + it.it = nil +} + +func (it *LevelDBIterator) Error() error { + return it.err +} + +type LevelDBStorageDriver struct { + file string + options *opt.Options + db *leveldb.DB +} + +func NewLevelDBStorageDriver(file string, options *opt.Options) *LevelDBStorageDriver { + return &LevelDBStorageDriver{ file, options, nil } +} + +func (levelDriver *LevelDBStorageDriver) Open() error { + levelDriver.Close() + + db, err := leveldb.OpenFile(levelDriver.file, levelDriver.options) + + if err != nil { + prometheusRecordStorageError("open()", levelDriver.file) + + if levelErrors.IsCorrupted(err) { + Log.Criticalf("LevelDB database is corrupted: %v", err.Error()) + + return ECorrupted + } + + return err + } + + levelDriver.db = db + + return nil +} + +func (levelDriver *LevelDBStorageDriver) Close() error { + if levelDriver.db == nil { + return nil + } + + err := levelDriver.db.Close() + + levelDriver.db = nil + + return err +} + +func (levelDriver *LevelDBStorageDriver) Recover() error { + levelDriver.Close() + + db, err := leveldb.RecoverFile(levelDriver.file, levelDriver.options) + + if err != nil { + prometheusRecordStorageError("recover()", levelDriver.file) + + return err + } + + levelDriver.db = db + + return nil +} + +func (levelDriver *LevelDBStorageDriver) Compact() error { + if levelDriver.db == nil { + return errors.New("Driver is closed") + } + + err := levelDriver.db.CompactRange(util.Range{ }) + + if err != nil { + prometheusRecordStorageError("compact()", levelDriver.file) + + return err + } + + return nil +} + +func (levelDriver *LevelDBStorageDriver) Get(keys [][]byte) ([][]byte, error) { + if levelDriver.db == nil { + return nil, errors.New("Driver is closed") + } + + if keys == nil { + return [][]byte{ }, nil + } + + snapshot, err := levelDriver.db.GetSnapshot() + + defer snapshot.Release() + + if err != nil { + prometheusRecordStorageError("get()", levelDriver.file) + + return nil, err + } + + values := make([][]byte, len(keys)) + + for i, key := range keys { + if key == nil { + values[i] = nil + } else { + values[i], err = snapshot.Get(key, &opt.ReadOptions{ false, opt.DefaultStrict }) + + if err != nil { + if err.Error() != "leveldb: not found" { + prometheusRecordStorageError("get()", levelDriver.file) + + return nil, err + } else { + values[i] = nil + } + } + } + } + + return values, nil +} + +func consolidateKeys(keys [][]byte) [][]byte { + if keys == nil { + return [][]byte{ } + } + + s := make([]string, 0, len(keys)) + + for _, key := range keys { + if key == nil { + continue + } + + s = append(s, string([]byte(key))) + } + + sort.Strings(s) + + result := make([][]byte, 0, len(s)) + + for i := 0; i < len(s); i += 1 { + if i == 0 { + result = append(result, []byte(s[i])) + continue + } + + if !strings.HasPrefix(s[i], s[i - 1]) { + result = append(result, []byte(s[i])) + } else { + s[i] = s[i - 1] + } + } + + return result +} + +func (levelDriver *LevelDBStorageDriver) GetMatches(keys [][]byte) (StorageIterator, error) { + if levelDriver.db == nil { + return nil, errors.New("Driver is closed") + } + + keys = consolidateKeys(keys) + snapshot, err := levelDriver.db.GetSnapshot() + + if err != nil { + prometheusRecordStorageError("getMatches()", levelDriver.file) + + snapshot.Release() + + return nil, err + } + + ranges := make([]*util.Range, 0, len(keys)) + + if keys == nil { + return &LevelDBIterator{ snapshot, nil, ranges, nil, nil, FORWARD }, nil + } + + for _, key := range keys { + if key == nil { + continue + } else { + ranges = append(ranges, util.BytesPrefix(key)) + } + } + + return &LevelDBIterator{ snapshot, nil, ranges, nil, nil, FORWARD }, nil +} + +func (levelDriver *LevelDBStorageDriver) GetRange(min, max []byte) (StorageIterator, error) { + if levelDriver.db == nil { + return nil, errors.New("Driver is closed") + } + + snapshot, err := levelDriver.db.GetSnapshot() + + if err != nil { + prometheusRecordStorageError("getRange()", levelDriver.file) + + snapshot.Release() + + return nil, err + } + + ranges := []*util.Range{ &util.Range{ min, max } } + + return &LevelDBIterator{ snapshot, nil, ranges, nil, nil, FORWARD }, nil +} + +func (levelDriver *LevelDBStorageDriver) GetRanges(ranges [][2][]byte, direction int) (StorageIterator, error) { + if levelDriver.db == nil { + return nil, errors.New("Driver is closed") + } + + snapshot, err := levelDriver.db.GetSnapshot() + + if err != nil { + prometheusRecordStorageError("getRanges()", levelDriver.file) + + snapshot.Release() + + return nil, err + } + + var levelRanges = make([]*util.Range, len(ranges)) + + for i := 0; i < len(ranges); i += 1 { + levelRanges[i] = &util.Range{ ranges[i][0], ranges[i][1] } + } + + return &LevelDBIterator{ snapshot, nil, levelRanges, nil, nil, direction }, nil +} + +func (levelDriver *LevelDBStorageDriver) Batch(batch *Batch) error { + if levelDriver.db == nil { + return errors.New("Driver is closed") + } + + if batch == nil { + return nil + } + + b := new(leveldb.Batch) + ops := batch.Ops() + + for _, op := range ops { + if op.OpType == PUT { + b.Put(op.Key(), op.Value()) + } else if op.OpType == DEL { + b.Delete(op.Key()) + } + } + + err := levelDriver.db.Write(b, nil) + + if err != nil { + prometheusRecordStorageError("batch()", levelDriver.file) + } + + return err +} + + +func (levelDriver *LevelDBStorageDriver) Snapshot(snapshotDirectory string, metadataPrefix []byte, metadata map[string]string) error { + if levelDriver.db == nil { + return errors.New("Driver is closed") + } + + snapshotDB, err := leveldb.OpenFile(snapshotDirectory, &opt.Options{ }) + + if err != nil { + prometheusRecordStorageError("snapshot()", levelDriver.file) + + Log.Errorf("Can't create snapshot because %s could not be opened for writing: %v", snapshotDirectory, err) + + return err + } + + Log.Debugf("Copying database contents to snapshot at %s", snapshotDirectory) + + if err := levelCopy(snapshotDB, levelDriver.db); err != nil { + prometheusRecordStorageError("snapshot()", levelDriver.file) + + Log.Errorf("Can't create snapshot because there was an error while copying the keys: %v", err) + + return err + } + + var metaBatch *leveldb.Batch = &leveldb.Batch{} + + Log.Debugf("Recording snapshot metadata: %v", metadata) + + // Now write the snapshot metadata + for metaKey, metaValue := range metadata { + var key []byte = make([]byte, len(metadataPrefix) + len([]byte(metaKey))) + + copy(key, metadataPrefix) + copy(key[len(metadataPrefix):], []byte(metaKey)) + + metaBatch.Put(key, []byte(metaValue)) + } + + if err := snapshotDB.Write(metaBatch, &opt.WriteOptions{ Sync: true }); err != nil { + prometheusRecordStorageError("snapshot()", levelDriver.file) + + Log.Errorf("Can't create snapshot because there was a problem recording the snapshot metadata: %v", err) + + return err + } + + if err := snapshotDB.Close(); err != nil { + prometheusRecordStorageError("snapshot()", levelDriver.file) + + Log.Errorf("Can't create snapshot because there was an error while closing the snapshot database at %s: %v", snapshotDirectory, err) + + return err + } + + Log.Debugf("Created snapshot at %s", snapshotDirectory) + + return nil +} + +func levelCopy(dest *leveldb.DB, src *leveldb.DB) error { + iter := src.NewIterator(&util.Range{}, &opt.ReadOptions{ DontFillCache: true }) + + defer iter.Release() + + var batch *leveldb.Batch = &leveldb.Batch{} + var batchSizeBytes int + var totalKeys uint64 + + for iter.Next() { + totalKeys++ + batch.Put(iter.Key(), iter.Value()) + batchSizeBytes += len(iter.Key()) + len(iter.Value()) + + if batchSizeBytes >= CopyBatchMaxBytes || batch.Len() >= CopyBatchSize { + Log.Debugf("Writing next copy chunk (batch.Len() = %d, batchSizeBytes = %d, totalKeys = %d)", batch.Len(), batchSizeBytes, totalKeys) + + if err := dest.Write(batch, &opt.WriteOptions{ Sync: true }); err != nil { + Log.Errorf("Can't create copy because there was a problem writing the next chunk to destination: %v", err) + + return err + } + + batchSizeBytes = 0 + batch.Reset() + } + } + + if iter.Error() != nil { + Log.Errorf("Can't create copy because there was an iterator error: %v", iter.Error()) + + return iter.Error() + } + + // Write the rest of the records in one last batch + if batch.Len() > 0 { + Log.Debugf("Writing next copy chunk (batch.Len() = %d, batchSizeBytes = %d, totalKeys = %d)", batch.Len(), batchSizeBytes, totalKeys) + + if err := dest.Write(batch, &opt.WriteOptions{ Sync: true }); err != nil { + Log.Errorf("Can't create copy because there was a problem writing the next chunk to destination: %v", err) + + return err + } + } + + return nil +} + +func (levelDriver *LevelDBStorageDriver) OpenSnapshot(snapshotDirectory string) (StorageDriver, error) { + snapshotDB := NewLevelDBStorageDriver(snapshotDirectory, &opt.Options{ ErrorIfMissing: true, ReadOnly: true }) + + if err := snapshotDB.Open(); err != nil { + prometheusRecordStorageError("openSnapshot()", snapshotDirectory) + + return nil, err + } + + return snapshotDB, nil +} + +func (levelDriver *LevelDBStorageDriver) Restore(storageDriver StorageDriver) error { + Log.Debugf("Restoring storage state from snapshot...") + + if otherLevelDriver, ok := storageDriver.(*LevelDBStorageDriver); ok { + err := levelDriver.restoreLevel(otherLevelDriver) + + if err != nil { + prometheusRecordStorageError("restore()", levelDriver.file) + } + + return err + } + + return errors.New("Snapshot source format not supported") +} + +func (levelDriver *LevelDBStorageDriver) restoreLevel(otherLevelDriver *LevelDBStorageDriver) error { + if err := levelCopy(levelDriver.db, otherLevelDriver.db); err != nil { + Log.Errorf("Unable to copy snapshot data to primary node storage: %v", err) + + return err + } + + Log.Debugf("Copied snapshot data to node storage successfully") + + return nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/sync/bucket_proxy.go b/vendor/github.com/armPelionEdge/devicedb/sync/bucket_proxy.go new file mode 100644 index 0000000..30b1435 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/sync/bucket_proxy.go @@ -0,0 +1,362 @@ +package sync +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "errors" + "math/rand" + + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/client" + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/clusterio" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/partition" + . "github.com/armPelionEdge/devicedb/site" + . "github.com/armPelionEdge/devicedb/raft" + rest "github.com/armPelionEdge/devicedb/rest" + . "github.com/armPelionEdge/devicedb/merkle" +) + +var ENoLocalBucket = errors.New("No such bucket exists locally") + +type BucketProxyFactory interface { + // Return a set of buckets for which updates can be + // pushed from the given node to this node/cluster + IncomingBuckets(peerID string) map[string]bool + // Return a set of buckets for which updates can be + // pushed from this node/cluster to the given node + OutgoingBuckets(peerID string) map[string]bool + // Create a bucket proxy to the bucket specified in the site + // that the peer belongs to + CreateBucketProxy(peerID string, bucket string) (BucketProxy, error) +} + +type RelayBucketProxyFactory struct { + // The site pool for this node + SitePool SitePool +} + +func (relayBucketProxyFactory *RelayBucketProxyFactory) CreateBucketProxy(peerID string, bucketName string) (BucketProxy, error) { + site := relayBucketProxyFactory.SitePool.Acquire("") + + if site.Buckets().Get(bucketName) == nil { + return nil, ENoLocalBucket + } + + return &RelayBucketProxy{ + Bucket: site.Buckets().Get(bucketName), + SitePool: relayBucketProxyFactory.SitePool, + SiteID: "", + }, nil +} + +func (relayBucketProxyFactory *RelayBucketProxyFactory) IncomingBuckets(peerID string) map[string]bool { + var buckets map[string]bool = make(map[string]bool) + + site := relayBucketProxyFactory.SitePool.Acquire("") + + for _, bucket := range site.Buckets().Incoming(peerID) { + buckets[bucket.Name()] = true + } + + return buckets +} + +func (relayBucketProxyFactory *RelayBucketProxyFactory) OutgoingBuckets(peerID string) map[string]bool { + var buckets map[string]bool = make(map[string]bool) + + site := relayBucketProxyFactory.SitePool.Acquire("") + + for _, bucket := range site.Buckets().Outgoing(peerID) { + buckets[bucket.Name()] = true + } + + return buckets +} + +type CloudBucketProxyFactory struct { + // An intra-cluster client + Client Client + // The cluster controller for this node + ClusterController *ClusterController + // The partition pool for this node + PartitionPool PartitionPool + // The cluster io agent for this node + ClusterIOAgent ClusterIOAgent +} + +func (cloudBucketProxyFactory *CloudBucketProxyFactory) CreateBucketProxy(peerID string, bucketName string) (BucketProxy, error) { + siteID := cloudBucketProxyFactory.ClusterController.RelaySite(peerID) + partitionNumber := cloudBucketProxyFactory.ClusterController.Partition(siteID) + nodeIDs := cloudBucketProxyFactory.ClusterController.PartitionOwners(partitionNumber) + + if len(nodeIDs) == 0 { + return nil, errors.New("No node owns this partition") + } + + // Choose a node at random from the nodes that own this site database + nodeID := nodeIDs[int(rand.Uint32() % uint32(len(nodeIDs)))] + + if cloudBucketProxyFactory.ClusterController.LocalNodeID == nodeID { + partition := cloudBucketProxyFactory.PartitionPool.Get(partitionNumber) + + if partition == nil { + return nil, ENoLocalBucket + } + + site := partition.Sites().Acquire(siteID) + + if site == nil || site.Buckets().Get(bucketName) == nil { + return nil, ENoLocalBucket + } + + localBucket := &CloudLocalBucketProxy{ + Bucket: site.Buckets().Get(bucketName), + SitePool: partition.Sites(), + SiteID: siteID, + ClusterIOAgent: cloudBucketProxyFactory.ClusterIOAgent, + } + + return localBucket, nil + } + + return &CloudRemoteBucketProxy{ + Client: cloudBucketProxyFactory.Client, + PeerAddress: cloudBucketProxyFactory.ClusterController.ClusterMemberAddress(nodeID), + SiteID: siteID, + BucketName: bucketName, + ClusterIOAgent: cloudBucketProxyFactory.ClusterIOAgent, + }, nil +} + +func (cloudBucketProxyFactory *CloudBucketProxyFactory) IncomingBuckets(peerID string) map[string]bool { + return map[string]bool{ "default": true, "lww": true } +} + +func (cloudBucketProxyFactory *CloudBucketProxyFactory) OutgoingBuckets(peerID string) map[string]bool { + return map[string]bool{ "default": true, "lww": true, "cloud": true } +} + +type BucketProxy interface { + Name() string + MerkleTree() MerkleTreeProxy + GetSyncChildren(nodeID uint32) (SiblingSetIterator, error) + Merge(mergedKeys map[string]*SiblingSet) error + Forget(keys [][]byte) error + Close() +} + +type RelayBucketProxy struct { + Bucket Bucket + SiteID string + SitePool SitePool +} + +func (relayBucketProxy *RelayBucketProxy) Name() string { + return relayBucketProxy.Bucket.Name() +} + +func (relayBucketProxy *RelayBucketProxy) MerkleTree() MerkleTreeProxy { + return &DirectMerkleTreeProxy{ + merkleTree: relayBucketProxy.Bucket.MerkleTree(), + } +} + +func (relayBucketProxy *RelayBucketProxy) GetSyncChildren(nodeID uint32) (SiblingSetIterator, error) { + return relayBucketProxy.Bucket.GetSyncChildren(nodeID) +} + +func (relayBucketProxy *RelayBucketProxy) Close() { + relayBucketProxy.SitePool.Release(relayBucketProxy.SiteID) +} + +func (relayBucketProxy *RelayBucketProxy) Merge(mergedKeys map[string]*SiblingSet) error { + return relayBucketProxy.Bucket.Merge(mergedKeys) +} + +func (relayBucketProxy *RelayBucketProxy) Forget(keys [][]byte) error { + return relayBucketProxy.Bucket.Forget(keys) +} + +type CloudResponderMerkleNodeIterator struct { + MerkleKeys rest.MerkleKeys + CurrentIndex int +} + +func (iter *CloudResponderMerkleNodeIterator) Next() bool { + if iter.CurrentIndex >= len(iter.MerkleKeys.Keys) - 1 { + iter.CurrentIndex = len(iter.MerkleKeys.Keys) + + return false + } + + iter.CurrentIndex++ + + return true +} + +func (iter *CloudResponderMerkleNodeIterator) Prefix() []byte { + return nil +} + +func (iter *CloudResponderMerkleNodeIterator) Key() []byte { + if iter.CurrentIndex < 0 || len(iter.MerkleKeys.Keys) == 0 || iter.CurrentIndex >= len(iter.MerkleKeys.Keys) { + return nil + } + + return []byte(iter.MerkleKeys.Keys[iter.CurrentIndex].Key) +} + +func (iter *CloudResponderMerkleNodeIterator) Value() *SiblingSet { + if iter.CurrentIndex < 0 || len(iter.MerkleKeys.Keys) == 0 || iter.CurrentIndex >= len(iter.MerkleKeys.Keys) { + return nil + } + + return iter.MerkleKeys.Keys[iter.CurrentIndex].Value +} + +func (iter *CloudResponderMerkleNodeIterator) LocalVersion() uint64 { + return 0 +} + +func (iter *CloudResponderMerkleNodeIterator) Release() { +} + +func (iter *CloudResponderMerkleNodeIterator) Error() error { + return nil +} + +type CloudLocalBucketProxy struct { + Bucket Bucket + SiteID string + SitePool SitePool + ClusterIOAgent ClusterIOAgent +} + +func (bucketProxy *CloudLocalBucketProxy) Name() string { + return bucketProxy.Bucket.Name() +} + +func (bucketProxy *CloudLocalBucketProxy) MerkleTree() MerkleTreeProxy { + return &DirectMerkleTreeProxy{ + merkleTree: bucketProxy.Bucket.MerkleTree(), + } +} + +func (bucketProxy *CloudLocalBucketProxy) GetSyncChildren(nodeID uint32) (SiblingSetIterator, error) { + return bucketProxy.Bucket.GetSyncChildren(nodeID) +} + +func (bucketProxy *CloudLocalBucketProxy) Merge(mergedKeys map[string]*SiblingSet) error { + _, _, err := bucketProxy.ClusterIOAgent.Merge(context.TODO(), bucketProxy.SiteID, bucketProxy.Bucket.Name(), mergedKeys) + + return err +} + +func (bucketProxy *CloudLocalBucketProxy) Forget(keys [][]byte) error { + return nil +} + +func (bucketProxy *CloudLocalBucketProxy) Close() { + bucketProxy.SitePool.Release(bucketProxy.SiteID) +} + +type CloudRemoteBucketProxy struct { + Client Client + PeerAddress PeerAddress + SiteID string + BucketName string + ClusterIOAgent ClusterIOAgent + merkleTreeProxy MerkleTreeProxy +} + +func (bucketProxy *CloudRemoteBucketProxy) Name() string { + return bucketProxy.BucketName +} + +func (bucketProxy *CloudRemoteBucketProxy) MerkleTree() MerkleTreeProxy { + if bucketProxy.merkleTreeProxy != nil { + return bucketProxy.merkleTreeProxy + } + + merkleTreeStats, err := bucketProxy.Client.MerkleTreeStats(context.TODO(), bucketProxy.PeerAddress, bucketProxy.SiteID, bucketProxy.BucketName) + + if err != nil { + bucketProxy.merkleTreeProxy = &CloudResponderMerkleTreeProxy{ + err: err, + } + + return bucketProxy.merkleTreeProxy + } + + dummyMerkleTree, err := NewDummyMerkleTree(merkleTreeStats.Depth) + + if err != nil { + bucketProxy.merkleTreeProxy = &CloudResponderMerkleTreeProxy{ + err: err, + } + + return bucketProxy.merkleTreeProxy + } + + bucketProxy.merkleTreeProxy = &CloudResponderMerkleTreeProxy{ + err: nil, + client: bucketProxy.Client, + peerAddress: bucketProxy.PeerAddress, + siteID: bucketProxy.SiteID, + bucketName: bucketProxy.BucketName, + merkleTree: dummyMerkleTree, + } + + return bucketProxy.merkleTreeProxy +} + +func (bucketProxy *CloudRemoteBucketProxy) GetSyncChildren(nodeID uint32) (SiblingSetIterator, error) { + merkleKeys, err := bucketProxy.Client.MerkleTreeNodeKeys(context.TODO(), bucketProxy.PeerAddress, bucketProxy.SiteID, bucketProxy.BucketName, nodeID) + + if err != nil { + return nil, err + } + + return &CloudResponderMerkleNodeIterator{ + MerkleKeys: merkleKeys, + CurrentIndex: -1, + }, nil +} + +func (bucketProxy *CloudRemoteBucketProxy) Merge(mergedKeys map[string]*SiblingSet) error { + _, _, err := bucketProxy.ClusterIOAgent.Merge(context.TODO(), bucketProxy.SiteID, bucketProxy.BucketName, mergedKeys) + + return err +} + +func (bucketProxy *CloudRemoteBucketProxy) Forget(keys [][]byte) error { + return nil +} + +func (bucketProxy *CloudRemoteBucketProxy) Close() { +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/sync/bucket_sync_http.go b/vendor/github.com/armPelionEdge/devicedb/sync/bucket_sync_http.go new file mode 100644 index 0000000..1e32683 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/sync/bucket_sync_http.go @@ -0,0 +1,278 @@ +package sync +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/rest" + . "github.com/armPelionEdge/devicedb/partition" + + "io" + "net/http" + "strconv" + "encoding/json" + "github.com/gorilla/mux" +) + +type BucketSyncHTTP struct { + PartitionPool PartitionPool + ClusterConfigController ClusterConfigController +} + +func (bucketSync *BucketSyncHTTP) Attach(router *mux.Router) { + router.HandleFunc("/sites/{siteID}/buckets/{bucket}/merkle", func(w http.ResponseWriter, r *http.Request) { + siteID := mux.Vars(r)["siteID"] + bucketName := mux.Vars(r)["bucket"] + partitionNumber := bucketSync.ClusterConfigController.ClusterController().Partition(siteID) + partition := bucketSync.PartitionPool.Get(partitionNumber) + + if partition == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + site := partition.Sites().Acquire(siteID) + defer partition.Sites().Release(siteID) + + if site == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + if site.Buckets().Get(bucketName) == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Bucket does not exist at this site", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n") + + return + } + + responseMerkleDepth := MerkleTree{ + Depth: site.Buckets().Get(bucketName).MerkleTree().Depth(), + } + + body, _ := json.Marshal(&responseMerkleDepth) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(body)) + }).Methods("GET") + + router.HandleFunc("/sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys", func(w http.ResponseWriter, r *http.Request) { + siteID := mux.Vars(r)["siteID"] + bucketName := mux.Vars(r)["bucket"] + partitionNumber := bucketSync.ClusterConfigController.ClusterController().Partition(siteID) + partition := bucketSync.PartitionPool.Get(partitionNumber) + + if partition == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + site := partition.Sites().Acquire(siteID) + defer partition.Sites().Release(siteID) + + if site == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + if site.Buckets().Get(bucketName) == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Bucket does not exist at this site", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n") + + return + } + + nodeID, err := strconv.ParseUint(mux.Vars(r)["nodeID"], 10, 32) + + if err != nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: nodeID was not properly formatted", siteID, bucketName, mux.Vars(r)["nodeID"]) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EMerkleRange.JSON()) + "\n") + + return + } + + siblingSetIter, err := site.Buckets().Get(bucketName).GetSyncChildren(uint32(nodeID)) + + if err != nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: %v", siteID, bucketName, mux.Vars(r)["nodeID"], err.Error()) + + var code int + var body string + + if err == EMerkleRange { + code = http.StatusBadRequest + body = string(EMerkleRange.JSON()) + } else if err == EStorage { + code = http.StatusInternalServerError + body = string(EStorage.JSON()) + } else { + code = http.StatusInternalServerError + body = string(EStorage.JSON()) + } + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(code) + io.WriteString(w, body + "\n") + + return + } + + responseMerkleKeys := MerkleKeys{ + Keys: make([]Key, 0), + } + + defer siblingSetIter.Release() + + for siblingSetIter.Next() { + responseMerkleKeys.Keys = append(responseMerkleKeys.Keys, Key{ + Key: string(siblingSetIter.Key()), + Value: siblingSetIter.Value(), + }) + } + + if siblingSetIter.Error() != nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: Sibling set iterator error: %v", siteID, bucketName, mux.Vars(r)["nodeID"], siblingSetIter.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, string(EStorage.JSON()) + "\n") + + return + } + + body, _ := json.Marshal(&responseMerkleKeys) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(body)) + }).Methods("GET") + + router.HandleFunc("/sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}", func(w http.ResponseWriter, r *http.Request) { + // Get the hash of a node + siteID := mux.Vars(r)["siteID"] + bucketName := mux.Vars(r)["bucket"] + partitionNumber := bucketSync.ClusterConfigController.ClusterController().Partition(siteID) + partition := bucketSync.PartitionPool.Get(partitionNumber) + + if partition == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + site := partition.Sites().Acquire(siteID) + defer partition.Sites().Release(siteID) + + if site == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Site does not exist at this node", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(ESiteDoesNotExist.JSON()) + "\n") + + return + } + + if site.Buckets().Get(bucketName) == nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle: Bucket does not exist at this site", siteID, bucketName) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, string(EBucketDoesNotExist.JSON()) + "\n") + + return + } + + nodeID, err := strconv.ParseUint(mux.Vars(r)["nodeID"], 10, 32) + + if err != nil { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: nodeID was not properly formatted", siteID, bucketName, mux.Vars(r)["nodeID"]) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EMerkleRange.JSON()) + "\n") + + return + } + + if nodeID >= uint64(site.Buckets().Get(bucketName).MerkleTree().NodeLimit()) { + Log.Warningf("GET /sites/{siteID}/buckets/{bucket}/merkle/nodes/{nodeID}/keys: nodeID out of range", siteID, bucketName, mux.Vars(r)["nodeID"]) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, string(EMerkleRange.JSON()) + "\n") + + return + } + + nodeHash := site.Buckets().Get(bucketName).MerkleTree().NodeHash(uint32(nodeID)) + + responseMerkleNodeHash := MerkleNode{ + Hash: nodeHash, + } + + body, _ := json.Marshal(&responseMerkleNodeHash) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + io.WriteString(w, string(body)) + }).Methods("GET") +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/sync/merkle_proxy.go b/vendor/github.com/armPelionEdge/devicedb/sync/merkle_proxy.go new file mode 100644 index 0000000..54b9b0a --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/sync/merkle_proxy.go @@ -0,0 +1,175 @@ +package sync +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + + . "github.com/armPelionEdge/devicedb/client" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/merkle" + . "github.com/armPelionEdge/devicedb/raft" +) + +type MerkleTreeProxy interface { + RootNode() uint32 + Depth() uint8 + NodeLimit() uint32 + Level(nodeID uint32) uint8 + LeftChild(nodeID uint32) uint32 + RightChild(nodeID uint32) uint32 + NodeHash(nodeID uint32) Hash + TranslateNode(nodeID uint32, depth uint8) uint32 + Error() error +} + +type DirectMerkleTreeProxy struct { + merkleTree *MerkleTree +} + +func (directMerkleProxy *DirectMerkleTreeProxy) MerkleTree() *MerkleTree { + return directMerkleProxy.merkleTree +} + +func (directMerkleProxy *DirectMerkleTreeProxy) RootNode() uint32 { + return directMerkleProxy.merkleTree.RootNode() +} + +func (directMerkleProxy *DirectMerkleTreeProxy) Depth() uint8 { + return directMerkleProxy.merkleTree.Depth() +} + +func (directMerkleProxy *DirectMerkleTreeProxy) NodeLimit() uint32 { + return directMerkleProxy.merkleTree.NodeLimit() +} + +func (directMerkleProxy *DirectMerkleTreeProxy) Level(nodeID uint32) uint8 { + return directMerkleProxy.merkleTree.Level(nodeID) +} + +func (directMerkleProxy *DirectMerkleTreeProxy) LeftChild(nodeID uint32) uint32 { + return directMerkleProxy.merkleTree.LeftChild(nodeID) +} + +func (directMerkleProxy *DirectMerkleTreeProxy) RightChild(nodeID uint32) uint32 { + return directMerkleProxy.merkleTree.RightChild(nodeID) +} + +func (directMerkleProxy *DirectMerkleTreeProxy) NodeHash(nodeID uint32) Hash { + return directMerkleProxy.merkleTree.NodeHash(nodeID) +} + +func (directMerkleProxy *DirectMerkleTreeProxy) TranslateNode(nodeID uint32, depth uint8) uint32 { + return directMerkleProxy.merkleTree.TranslateNode(nodeID, depth) +} + +func (directMerkleProxy *DirectMerkleTreeProxy) Error() error { + return nil +} + +type CloudResponderMerkleTreeProxy struct { + err error + client Client + peerAddress PeerAddress + siteID string + bucketName string + merkleTree *MerkleTree +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) RootNode() uint32 { + if cloudResponderMerkleProxy.err != nil { + return 0 + } + + return cloudResponderMerkleProxy.merkleTree.RootNode() +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) Depth() uint8 { + if cloudResponderMerkleProxy.err != nil { + return 0 + } + + return cloudResponderMerkleProxy.merkleTree.Depth() +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) NodeLimit() uint32 { + if cloudResponderMerkleProxy.err != nil { + return 0 + } + + return cloudResponderMerkleProxy.merkleTree.NodeLimit() +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) Level(nodeID uint32) uint8 { + if cloudResponderMerkleProxy.err != nil { + return 0 + } + + return cloudResponderMerkleProxy.merkleTree.Level(nodeID) +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) LeftChild(nodeID uint32) uint32 { + if cloudResponderMerkleProxy.err != nil { + return 0 + } + + return cloudResponderMerkleProxy.merkleTree.LeftChild(nodeID) +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) RightChild(nodeID uint32) uint32 { + if cloudResponderMerkleProxy.err != nil { + return 0 + } + + return cloudResponderMerkleProxy.merkleTree.RightChild(nodeID) +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) NodeHash(nodeID uint32) Hash { + if cloudResponderMerkleProxy.err != nil { + return Hash{} + } + + merkleNode, err := cloudResponderMerkleProxy.client.MerkleTreeNode(context.TODO(), cloudResponderMerkleProxy.peerAddress, cloudResponderMerkleProxy.siteID, cloudResponderMerkleProxy.bucketName, nodeID) + + if err != nil { + cloudResponderMerkleProxy.err = err + + return Hash{} + } + + return merkleNode.Hash +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) TranslateNode(nodeID uint32, depth uint8) uint32 { + if cloudResponderMerkleProxy.err != nil { + return 0 + } + + return cloudResponderMerkleProxy.merkleTree.TranslateNode(nodeID, depth) +} + +func (cloudResponderMerkleProxy *CloudResponderMerkleTreeProxy) Error() error { + return cloudResponderMerkleProxy.err +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/sync/sync_scheduler.go b/vendor/github.com/armPelionEdge/devicedb/sync/sync_scheduler.go new file mode 100644 index 0000000..ced944d --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/sync/sync_scheduler.go @@ -0,0 +1,305 @@ +package sync +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "container/heap" + "sync" + "time" +) + +type Peer struct { + id string + buckets []string + nextBucket int + nextSyncTime time.Time +} + +func NewPeer(id string, buckets []string) *Peer { + return &Peer{ + id: id, + buckets: buckets, + nextBucket: 0, + } +} + +func (peer *Peer) NextBucket() string { + if len(peer.buckets) == 0 { + return "" + } + + return peer.buckets[peer.nextBucket] +} + +func (peer *Peer) Advance() { + if len(peer.buckets) == 0 { + peer.nextBucket = 0 + } + + peer.nextBucket = (peer.nextBucket + 1) % len(peer.buckets) +} + +type PeerHeap []*Peer + +func (h PeerHeap) Len() int { + return len(h) +} + +func (h PeerHeap) Less(i, j int) bool { + return h[i].nextSyncTime.Before(h[j].nextSyncTime) +} + +func (h PeerHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *PeerHeap) Push(x interface{}) { + *h = append(*h, x.(*Peer)) +} + +func (h *PeerHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n - 1] + *h = old[0 : n - 1] + + return x +} + +type SyncScheduler interface { + AddPeer(peerID string, buckets []string) + RemovePeer(peerID string) + Next() (string, string) + Advance() + Schedule(peerID string) +} + +// Sync queue optimized for relays +// that provides a new sync partner +// at a fixed rate +type PeriodicSyncScheduler struct { + syncPeriod time.Duration + peers map[string]*Peer + queue []*Peer + mu sync.Mutex +} + +func NewPeriodicSyncScheduler(syncPeriod time.Duration) *PeriodicSyncScheduler { + return &PeriodicSyncScheduler{ + syncPeriod: syncPeriod, + peers: make(map[string]*Peer), + queue: make([]*Peer, 0), + } +} + +func (syncScheduler *PeriodicSyncScheduler) AddPeer(peerID string, buckets []string) { + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + if _, ok := syncScheduler.peers[peerID]; ok { + return + } + + syncScheduler.peers[peerID] = NewPeer(peerID, buckets) +} + +func (syncScheduler *PeriodicSyncScheduler) RemovePeer(peerID string) { + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + newQueue := make([]*Peer, 0, len(syncScheduler.queue)) + + for _, peer := range syncScheduler.queue { + if peer.id == peerID { + continue + } + + newQueue = append(newQueue, peer) + } + + syncScheduler.queue = newQueue + delete(syncScheduler.peers, peerID) +} + +func (syncScheduler *PeriodicSyncScheduler) Next() (string, string) { + <-time.After(syncScheduler.syncPeriod) + + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + if len(syncScheduler.queue) == 0 { + return "", "" + } + + return syncScheduler.queue[0].id, syncScheduler.queue[0].NextBucket() +} + +func (syncScheduler *PeriodicSyncScheduler) Advance() { + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + if len(syncScheduler.queue) == 0 { + return + } + + peer := syncScheduler.queue[0] + syncScheduler.queue = syncScheduler.queue[1:] + peer.Advance() +} + +func (syncScheduler *PeriodicSyncScheduler) Schedule(peerID string) { + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + if _, ok := syncScheduler.peers[peerID]; !ok { + return + } + + syncScheduler.queue = append(syncScheduler.queue, syncScheduler.peers[peerID]) +} + +// Optimized for cloud servers that need +// to coordinate sync sessions with hundreds +// or thousands of relays at once. Tries to ensure +// that for any particular peer, sync sessions with +// that peer are scheduled periodically attempting +// to minimize jitter between the scheduled time +// and the actual time. +type MultiSyncScheduler struct { + syncPeriod time.Duration + peers map[string]*Peer + heap *PeerHeap + mu sync.Mutex + lastPeer *Peer +} + +func NewMultiSyncScheduler(syncPeriod time.Duration) *MultiSyncScheduler { + peerHeap := &PeerHeap{ } + heap.Init(peerHeap) + + return &MultiSyncScheduler{ + syncPeriod: syncPeriod, + peers: make(map[string]*Peer), + heap: peerHeap, + } +} + +func (syncScheduler *MultiSyncScheduler) AddPeer(peerID string, buckets []string) { + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + if _, ok := syncScheduler.peers[peerID]; ok { + return + } + + syncScheduler.peers[peerID] = NewPeer(peerID, buckets) +} + +func (syncScheduler *MultiSyncScheduler) RemovePeer(peerID string) { + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + for i := 0; i < syncScheduler.heap.Len(); i++ { + h := *syncScheduler.heap + peer := h[i] + + if peer.id == peerID { + heap.Remove(syncScheduler.heap, i) + } + } + + delete(syncScheduler.peers, peerID) + syncScheduler.lastPeer = nil +} + +func (syncScheduler *MultiSyncScheduler) Next() (string, string) { + syncScheduler.mu.Lock() + + if syncScheduler.heap.Len() == 0 { + syncScheduler.mu.Unlock() + + // Wait for the default timeout + <-time.After(syncScheduler.syncPeriod) + + return "", "" + } + + + h := *syncScheduler.heap + peer := h[0] + now := time.Now() + syncTime := peer.nextSyncTime + + // Was Next() called again before calling Advance()? + if syncScheduler.lastPeer == peer { + syncScheduler.mu.Unlock() + + <-time.After(syncScheduler.syncPeriod) + + return peer.id, peer.NextBucket() + } + + syncScheduler.lastPeer = peer + + // unlock the mutex. It is needed only + // to synchronize access to the heap + // and peers map + syncScheduler.mu.Unlock() + + // If we need to wait a while before + // returning, do so + if now.Before(syncTime) { + <-time.After(syncTime.Sub(now)) + } + + return peer.id, peer.NextBucket() +} + +func (syncScheduler *MultiSyncScheduler) Advance() { + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + if syncScheduler.heap.Len() == 0 { + return + } + + h := *syncScheduler.heap + h[0].Advance() + heap.Pop(syncScheduler.heap) + syncScheduler.lastPeer = nil +} + +func (syncScheduler *MultiSyncScheduler) Schedule(peerID string) { + syncScheduler.mu.Lock() + defer syncScheduler.mu.Unlock() + + if _, ok := syncScheduler.peers[peerID]; !ok { + return + } + + // Schedule the next sync with this peer after syncPeriod duration + syncScheduler.peers[peerID].nextSyncTime = time.Now().Add(syncScheduler.syncPeriod) + heap.Push(syncScheduler.heap, syncScheduler.peers[peerID]) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/canceler.go b/vendor/github.com/armPelionEdge/devicedb/transfer/canceler.go new file mode 100644 index 0000000..c0d54da --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/canceler.go @@ -0,0 +1,29 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +type Canceler struct { + Cancel func() +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/chunk.go b/vendor/github.com/armPelionEdge/devicedb/transfer/chunk.go new file mode 100644 index 0000000..960bc5b --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/chunk.go @@ -0,0 +1,48 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/data" +) + +const DefaultChunkSize = 100 + +type Entry struct { + Site string + Bucket string + Key string + Value *SiblingSet +} + +type PartitionChunk struct { + Index uint64 + Entries []Entry + Checksum Hash +} + +func (partitionChunk *PartitionChunk) IsEmpty() bool { + return len(partitionChunk.Entries) == 0 +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/downloader.go b/vendor/github.com/armPelionEdge/devicedb/transfer/downloader.go new file mode 100644 index 0000000..ce96eaa --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/downloader.go @@ -0,0 +1,380 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "errors" + "io" + "sync" + "time" + + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/partition" +) + +type PartitionDownloader interface { + // Starts the download process for a partition if it is not yet downloaded and + // there isn't yet a download occurring for that partition. + // Returns a channel that closes when the download is complete + // If the download is successful all future calls to Download for that partition + // should return that closed channel until CancelDownload is called + // which resets it + Download(partition uint64) <-chan int + // Resets the downloader's internal state for this partition. Next time Download() is called + // for this partition it should start a new download + Reset(partition uint64) + // Returns a boolean indicating whether or not a download is in progress + // for this partition + IsDownloading(partition uint64) bool + // Cancels any download in progress. Resets internal state so next + // call to Download for a partition will start a new download and + // return a new after channel + CancelDownload(partition uint64) +} + +type Downloader struct { + transferTransport PartitionTransferTransport + transferPartnerStrategy PartitionTransferPartnerStrategy + transferFactory PartitionTransferFactory + partitionPool PartitionPool + configController ClusterConfigController + downloadCancelers map[uint64]*Canceler + currentDownloads map[uint64]chan int + downloadStopCB func(uint64) + panicCB func(p interface{}) + lock sync.Mutex +} + +func NewDownloader(configController ClusterConfigController, transferTransport PartitionTransferTransport, transferPartnerStrategy PartitionTransferPartnerStrategy, transferFactory PartitionTransferFactory, partitionPool PartitionPool) *Downloader { + return &Downloader{ + downloadCancelers: make(map[uint64]*Canceler, 0), + currentDownloads: make(map[uint64]chan int, 0), + configController: configController, + transferTransport: transferTransport, + transferPartnerStrategy: transferPartnerStrategy, + transferFactory: transferFactory, + partitionPool: partitionPool, + } +} + +// A callback that will be invoked after a download for a partition is cancelled +// or completed. Used only for tooling in order to test the flow of the downloader +// code +func (downloader *Downloader) OnDownloadStop(cb func(partition uint64)) { + downloader.downloadStopCB = cb +} +// A callback that will be invoked if there is a panic that occurs while writing +// keys from a transfer. Used only for tooling in order to test the flow of the downloader +// code +func (downloader *Downloader) OnPanic(cb func(p interface{})) { + downloader.panicCB = cb +} + +func (downloader *Downloader) notifyDownloadStop(partition uint64) { + if downloader.downloadStopCB != nil { + downloader.downloadStopCB(partition) + } +} + +func (downloader *Downloader) Reset(partition uint64) { + downloader.lock.Lock() + defer downloader.lock.Unlock() + + delete(downloader.currentDownloads, partition) +} + +func (downloader *Downloader) Download(partition uint64) <-chan int { + downloader.lock.Lock() + defer downloader.lock.Unlock() + + Log.Errorf("Node %d starting download process for %d", downloader.configController.ClusterController().LocalNodeID, partition) + // A download is already underway for this partition + if _, ok := downloader.currentDownloads[partition]; ok { + Log.Errorf("Node %d aborting download process for %d: it already has a download going", downloader.configController.ClusterController().LocalNodeID, partition) + return downloader.currentDownloads[partition] + } + + node := downloader.configController.ClusterController().State.Nodes[downloader.configController.ClusterController().LocalNodeID] + done := make(chan int) + + // Since this node is already a holder of this partition there is no need to + // start a download. Just propose any pending transfers straight away + if _, ok := node.PartitionReplicas[partition]; ok { + Log.Errorf("Node %d aborting download process for %d: it already holds a replica of this partition", downloader.configController.ClusterController().LocalNodeID, partition) + close(done) + + return done + } + + ctx, cancel := context.WithCancel(context.Background()) + canceler := &Canceler{ Cancel: cancel } + downloader.downloadCancelers[partition] = canceler + downloader.currentDownloads[partition] = done + + go func() { + defer func() { + downloader.lock.Lock() + defer downloader.lock.Unlock() + + if _, ok := downloader.downloadCancelers[partition]; !ok { + return + } + + if downloader.downloadCancelers[partition] == canceler { + delete(downloader.downloadCancelers, partition) + } + + downloader.notifyDownloadStop(partition) + + if r := recover(); r != nil { + if downloader.panicCB == nil { + panic(r) + } + + downloader.panicCB(r) + } + }() + + retryTimeoutSeconds := 0 + + Log.Infof("Local node (id = %d) starting transfer to obtain a replica of partition %d", downloader.configController.ClusterController().LocalNodeID, partition) + + for { + if retryTimeoutSeconds != 0 { + Log.Infof("Local node (id = %d) will attempt to obtain a replica of partition %d again in %d seconds", downloader.configController.ClusterController().LocalNodeID, partition, retryTimeoutSeconds) + + select { + case <-time.After(time.Second * time.Duration(retryTimeoutSeconds)): + case <-ctx.Done(): + Log.Infof("Local node (id = %d) cancelled all transfers for partition %d. Cancelling download.", downloader.configController.ClusterController().LocalNodeID, partition) + return + } + } + + partnerID := downloader.transferPartnerStrategy.ChooseTransferPartner(partition) + + if partnerID == 0 { + // No other node holds a replica of this partition. Move onto the phase where we propose + // a transfer in the raft log + break + } + + Log.Infof("Local node (id = %d) starting transfer of partition %d from node %d", downloader.configController.ClusterController().LocalNodeID, partition, partnerID) + reader, closeReader, err := downloader.transferTransport.Get(partnerID, partition) + + if err != nil { + Log.Warningf("Local node (id = %d) unable to obtain a replica of partition %d from node %d: %v", downloader.configController.ClusterController().LocalNodeID, partition, partnerID, err.Error()) + + if retryTimeoutSeconds == 0 { + retryTimeoutSeconds = 1 + } else if retryTimeoutSeconds != RetryTimeoutMax { + retryTimeoutSeconds *= 2 + } + + continue + } + + retryTimeoutSeconds = 0 + partitionTransfer := downloader.transferFactory.CreateIncomingTransfer(reader) + chunks := make(chan PartitionChunk) + errors := make(chan error) + finished := make(chan int) + + go func() { + for { + nextChunk, err := partitionTransfer.NextChunk() + + if !nextChunk.IsEmpty() { + select { + case chunks <- nextChunk: + case <-finished: + break + } + } + + if err != nil { + if err == EEntryChecksum { + Log.Errorf("Local node (id = %d) received a corrupted chunk of partition %d from node %d. It was unable to verify the checksum of the chunk", downloader.configController.ClusterController().LocalNodeID, partition, partnerID) + } else if err != io.EOF { + Log.Errorf("Local node (id = %d) was unable to obtain the next chunk of partition %d from node %d: %v", downloader.configController.ClusterController().LocalNodeID, partition, partnerID, err.Error()) + } + + select { + case errors <- err: + case <-finished: + } + + break + } + } + + close(errors) + }() + + retry := false + + func() { + defer func() { + // Drain errors from above goroutine + partitionTransfer.Cancel() + close(finished) + }() + + for { + select { + case chunk := <-chunks: + Log.Debugf("Local node (id = %d) received chunk %d of partition %d from node %d", downloader.configController.ClusterController().LocalNodeID, chunk.Index, partition, partnerID) + + if err := downloader.mergeChunk(partition, chunk); err != nil { + retry = true + return + } + case err := <-errors: + // Stop running this loop and retry the download only if + // The error is not io.EOF. io.EOF indicates the end of a stream + // which means a successful download + retry = (err != io.EOF) + return + case <-ctx.Done(): + // The download was cancelled externally + retry = false + return + } + } + }() + + closeReader() + + if !retry { + // The download was successful + break + } + + // Need to try again + if retryTimeoutSeconds == 0 { + retryTimeoutSeconds = 1 + } else if retryTimeoutSeconds != RetryTimeoutMax { + retryTimeoutSeconds *= 2 + } + } + + if ctx.Err() != context.Canceled { + // closing done signals to any pending replica transfer + // proposers that the data transfer has finished and now + // is time to propose the raft log transfer. It should only + // be closed if the download completed successfully and was + // not cancelled externally + close(done) + } + }() + + return done +} + +func (downloader *Downloader) mergeChunk(partition uint64, chunk PartitionChunk) error { + partitionReplica := downloader.partitionPool.Get(partition) + + if partitionReplica == nil { + var msg string = + "This represents a major flaw in the coordination between the downloader and the partition pool " + + "and is non-recoverable. This should not happen since a precondition for invoking a download of a partition on a node" + + "is to have initialized that partition in the partition pool, and a precondition for removing a partition from the partition" + + "pool is having cancelled any transfers or downloads for that partition." + + Log.Panicf("Local node (id = %d) is trying to download data to partition %d which is not initialized\n\n %s", downloader.configController.ClusterController().LocalNodeID, partition, msg) + + return errors.New("Partition is not registered") + } + + for _, entry := range chunk.Entries { + site := partitionReplica.Sites().Acquire(entry.Site) + + if site == nil { + // This represents a case where the nodes disagree with which sites exist. This node is unaware + // of this site while the partner node thinks this site exists. This means one of two cases is true + // 1) The site used to exist and was since deleted and the local node is further ahead in the log than the remote node + // 2) The site was recently created and the remote node is futher ahead in the log than the local node + // The solution in both cases is to wait and try again later when the two nodes are both caught up in the log + // If case 1 is true: + // The remote node will eventually catch up in the log and will either filter out data from the deleted site while + // performing the transfer or delete that data entirely from its local storage + // If case 2 is true: + // The local node will eventually catch up and will accept the entries for this site on the next transfer + Log.Warningf("Local node (id = %d) is trying to download data to site %s in partition %d and doesn't think that site exists.", downloader.configController.ClusterController().LocalNodeID, entry.Site, partition) + + return errors.New("Site does not exist") + } + + bucket := site.Buckets().Get(entry.Bucket) + + if bucket == nil { + // Since the bucket names are entirely built in and normalized across nodes this should not happen + // If it does it represents an unrecoverable error and should be looked into + Log.Panicf("Local node (id = %d) is trying to download data to bucket %s in site %s in partition %d and that bucket doesn't exist at that site.", downloader.configController.ClusterController().LocalNodeID, entry.Bucket, entry.Site, partition) + + return errors.New("Bucket does not exist") + } + + err := bucket.Merge(map[string]*SiblingSet{ entry.Key: entry.Value }) + + if err != nil { + // A storage error like this probably represents some sort of disk or machine failure and should be reported in a way that stands out + Log.Criticalf("Local node (id = %d) encountered an error while calling Merge() for key %s in bucket %s in site %s in partition %d: %v", downloader.configController.ClusterController().LocalNodeID, entry.Key, entry.Bucket, entry.Site, partition, err.Error()) + + return errors.New("Merge error") + } + } + + return nil +} + +// Important! +// This should only be called by a transfer agent if all transfer +// proposals waiting for this download have been cancelled first +func (downloader *Downloader) CancelDownload(partition uint64) { + downloader.lock.Lock() + defer downloader.lock.Unlock() + + // Cancel current download (if any) for this partition + if canceler, ok := downloader.downloadCancelers[partition]; ok { + Log.Infof("Local node (id = %d) is cancelling download of partition %d", downloader.configController.ClusterController().LocalNodeID, partition) + canceler.Cancel() + + delete(downloader.downloadCancelers, partition) + delete(downloader.currentDownloads, partition) + } +} + +func (downloader *Downloader) IsDownloading(partition uint64) bool { + downloader.lock.Lock() + defer downloader.lock.Unlock() + + _, ok := downloader.downloadCancelers[partition] + + return ok +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/transfer.go b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer.go new file mode 100644 index 0000000..c563010 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer.go @@ -0,0 +1,206 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "bufio" + "io" + "errors" + "encoding/json" + "math" + + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/partition" +) + +var ETransferCancelled = errors.New("Cancelled") +var EEntryChecksum = errors.New("Unable to reproduce the checksum for the entry in the partition chunk") + +const ( + DefaultScanBufferSize = 100 +) + +func ChecksumEntries(entries []Entry) Hash { + hash := Hash{ } + + for _, entry := range entries { + hash = hash.Xor(entry.Value.Hash([]byte(entry.Key))) + } + + return hash +} + +type EntryFilter func(Entry) bool + +type PartitionTransfer interface { + NextChunk() (PartitionChunk, error) + UseFilter(EntryFilter) + Cancel() +} + +type IncomingTransfer struct { + scanner *bufio.Scanner + err error +} + +func NewIncomingTransfer(reader io.Reader) *IncomingTransfer { + scanner := bufio.NewScanner(reader) + scanner.Buffer(make([]byte, DefaultScanBufferSize), math.MaxInt32) + + return &IncomingTransfer{ + scanner: scanner, + } +} + +func (transfer *IncomingTransfer) UseFilter(entryFilter EntryFilter) { +} + +func (transfer *IncomingTransfer) NextChunk() (PartitionChunk, error) { + if transfer.err != nil { + return PartitionChunk{}, transfer.err + } + + if !transfer.scanner.Scan() { + if transfer.scanner.Err() != nil { + return PartitionChunk{}, transfer.scanner.Err() + } + + return PartitionChunk{}, io.EOF + } + + encoded := transfer.scanner.Bytes() + var nextPartitionChunk PartitionChunk + + if err := json.Unmarshal(encoded, &nextPartitionChunk); err != nil { + transfer.err = err + + return PartitionChunk{}, transfer.err + } + + checksum := ChecksumEntries(nextPartitionChunk.Entries) + + if checksum.High() != nextPartitionChunk.Checksum.High() || checksum.Low() != nextPartitionChunk.Checksum.Low() { + Log.Criticalf("Checksums don't match received high: %d calculated high: %d received low: %d calculated low: %d", nextPartitionChunk.Checksum.High(), checksum.High(), nextPartitionChunk.Checksum.Low(), checksum.Low()) + + return PartitionChunk{}, EEntryChecksum + } + + return nextPartitionChunk, nil +} + +func (transfer *IncomingTransfer) Cancel() { + transfer.err = ETransferCancelled +} + +type OutgoingTransfer struct { + partitionIterator PartitionIterator + chunkSize int + nextChunkIndex uint64 + entryFilter EntryFilter + err error +} + +func NewOutgoingTransfer(partition Partition, chunkSize int) *OutgoingTransfer { + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + + return &OutgoingTransfer{ + partitionIterator: partition.Iterator(), + chunkSize: chunkSize, + nextChunkIndex: 1, + } +} + +func (transfer *OutgoingTransfer) UseFilter(entryFilter EntryFilter) { + transfer.entryFilter = entryFilter +} + +func (transfer *OutgoingTransfer) NextChunk() (PartitionChunk, error) { + if transfer.err != nil { + return PartitionChunk{}, transfer.err + } + + entries := make([]Entry, 0, transfer.chunkSize) + + for transfer.partitionIterator.Next() { + entry := Entry{ + Site: transfer.partitionIterator.Site(), + Bucket: transfer.partitionIterator.Bucket(), + Key: transfer.partitionIterator.Key(), + Value: transfer.partitionIterator.Value(), + } + + // See if this value should be allowed or if it should be filtered out + if transfer.entryFilter != nil && !transfer.entryFilter(entry) { + continue + } + + entries = append(entries, entry) + + if len(entries) == transfer.chunkSize { + index := transfer.nextChunkIndex + transfer.nextChunkIndex++ + + return PartitionChunk{ + Index: index, + Entries: entries, + Checksum: ChecksumEntries(entries), + }, nil + } + } + + transfer.partitionIterator.Release() + + if transfer.partitionIterator.Error() != nil { + transfer.err = transfer.partitionIterator.Error() + + return PartitionChunk{}, transfer.err + } + + transfer.err = io.EOF + + if len(entries) == 0 { + return PartitionChunk{}, transfer.err + } + + index := transfer.nextChunkIndex + transfer.nextChunkIndex++ + + return PartitionChunk{ + Index: index, + Entries: entries, + Checksum: ChecksumEntries(entries), + }, nil +} + +func (transfer *OutgoingTransfer) Cancel() { + if transfer.err == nil { + transfer.err = ETransferCancelled + } + + transfer.partitionIterator.Release() +} diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_agent.go b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_agent.go new file mode 100644 index 0000000..e3c1feb --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_agent.go @@ -0,0 +1,270 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "io" + "net/http" + "strconv" + "sync" + + . "github.com/armPelionEdge/devicedb/cluster" + . "github.com/armPelionEdge/devicedb/logging" + . "github.com/armPelionEdge/devicedb/partition" + + "github.com/gorilla/mux" +) + +const RetryTimeoutMax = 32 + +type PartitionTransferAgent interface { + // Tell the partition transfer agent to start the holdership transfer process for this partition replica + StartTransfer(partition uint64, replica uint64) + // Tell the partition transfer agent to stop any holdership transfer processes for this partition replica + StopTransfer(partition uint64, replica uint64) + // Stop all holdership transfers for all partition replicas + StopAllTransfers() + // Allow downloads of this partition from this node + EnableOutgoingTransfers(partition uint64) + // Disallow future downloads of this partition from this node and cancel any currently running ones + DisableOutgoingTransfers(partition uint64) + // Disallow future downloads of all partition from this node and cancel any currently running ones + DisableAllOutgoingTransfers() +} + +type HTTPTransferAgent struct { + configController ClusterConfigController + transferProposer PartitionTransferProposer + partitionDownloader PartitionDownloader + transferFactory PartitionTransferFactory + partitionPool PartitionPool + transferrablePartitions map[uint64]bool + outgoingTransfers map[uint64]map[PartitionTransfer]bool + lock sync.Mutex +} + +// An easy constructor +func NewDefaultHTTPTransferAgent(configController ClusterConfigController, partitionPool PartitionPool) *HTTPTransferAgent { + transferTransport := NewHTTPTransferTransport(configController, &http.Client{ }) + transferPartnerStrategy := NewRandomTransferPartnerStrategy(configController) + transferFactory := &TransferFactory{ } + + return &HTTPTransferAgent{ + configController: configController, + transferProposer: NewTransferProposer(configController), + partitionDownloader: NewDownloader(configController, transferTransport, transferPartnerStrategy, transferFactory, partitionPool), + transferFactory: transferFactory, + partitionPool: partitionPool, + transferrablePartitions: make(map[uint64]bool, 0), + outgoingTransfers: make(map[uint64]map[PartitionTransfer]bool, 0), + } +} + +func NewHTTPTransferAgent(configController ClusterConfigController, transferProposer PartitionTransferProposer, partitionDownloader PartitionDownloader, transferFactory PartitionTransferFactory, partitionPool PartitionPool) *HTTPTransferAgent { + return &HTTPTransferAgent{ + configController: configController, + transferProposer: transferProposer, + partitionDownloader: partitionDownloader, + transferFactory: transferFactory, + partitionPool: partitionPool, + transferrablePartitions: make(map[uint64]bool, 0), + outgoingTransfers: make(map[uint64]map[PartitionTransfer]bool, 0), + } +} + +func (transferAgent *HTTPTransferAgent) StartTransfer(partition uint64, replica uint64) { + transferAgent.lock.Lock() + defer transferAgent.lock.Unlock() + + transferAgent.transferProposer.QueueTransferProposal(partition, replica, transferAgent.partitionDownloader.Download(partition)) +} + +func (transferAgent *HTTPTransferAgent) StopTransfer(partition uint64, replica uint64) { + transferAgent.lock.Lock() + defer transferAgent.lock.Unlock() + + transferAgent.stopTransfer(partition, replica) +} + +func (transferAgent *HTTPTransferAgent) stopTransfer(partition, replica uint64) { + transferAgent.transferProposer.CancelTransferProposal(partition, replica) + + if transferAgent.transferProposer.PendingProposals(partition) == 0 { + transferAgent.partitionDownloader.CancelDownload(partition) + transferAgent.partitionDownloader.Reset(partition) + } +} + +func (transferAgent *HTTPTransferAgent) StopAllTransfers() { + transferAgent.lock.Lock() + defer transferAgent.lock.Unlock() + + queuedProposals := transferAgent.transferProposer.QueuedProposals() + + for partition, replicas := range queuedProposals { + for replica, _ := range replicas { + transferAgent.stopTransfer(partition, replica) + } + } +} + +func (transferAgent *HTTPTransferAgent) EnableOutgoingTransfers(partition uint64) { + transferAgent.lock.Lock() + defer transferAgent.lock.Unlock() + + transferAgent.transferrablePartitions[partition] = true +} + +func (transferAgent *HTTPTransferAgent) DisableOutgoingTransfers(partition uint64) { + transferAgent.lock.Lock() + defer transferAgent.lock.Unlock() + + transferAgent.disableOutgoingTransfers(partition) +} + +func (transferAgent *HTTPTransferAgent) disableOutgoingTransfers(partition uint64) { + delete(transferAgent.transferrablePartitions, partition) + + for transfer, _ := range transferAgent.outgoingTransfers[partition] { + transfer.Cancel() + + delete(transferAgent.outgoingTransfers[partition], transfer) + } + + delete(transferAgent.outgoingTransfers, partition) +} + +func (transferAgent *HTTPTransferAgent) DisableAllOutgoingTransfers() { + transferAgent.lock.Lock() + defer transferAgent.lock.Unlock() + + for partition, _ := range transferAgent.transferrablePartitions { + transferAgent.disableOutgoingTransfers(partition) + } +} + +func (transferAgent *HTTPTransferAgent) partitionIsTransferrable(partition uint64) bool { + _, ok := transferAgent.transferrablePartitions[partition] + + return ok +} + +func (transferAgent *HTTPTransferAgent) registerOutgoingTransfer(partition uint64, transfer PartitionTransfer) { + if _, ok := transferAgent.outgoingTransfers[partition]; !ok { + transferAgent.outgoingTransfers[partition] = make(map[PartitionTransfer]bool, 0) + } + + transferAgent.outgoingTransfers[partition][transfer] = true +} + +func (transferAgent *HTTPTransferAgent) unregisterOutgoingTransfer(partition uint64, transfer PartitionTransfer) { + if _, ok := transferAgent.outgoingTransfers[partition]; ok { + delete(transferAgent.outgoingTransfers[partition], transfer) + } + + if len(transferAgent.outgoingTransfers[partition]) == 0 { + delete(transferAgent.outgoingTransfers, partition) + } +} + +func (transferAgent *HTTPTransferAgent) Attach(router *mux.Router) { + router.HandleFunc("/partitions/{partition}/keys", func(w http.ResponseWriter, req *http.Request) { + partitionNumber, err := strconv.ParseUint(mux.Vars(req)["partition"], 10, 64) + + if err != nil { + Log.Warningf("Invalid partition number specified in partition transfer HTTP request. Value cannot be parsed as uint64: %s", mux.Vars(req)["partition"]) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusBadRequest) + io.WriteString(w, "\n") + + + return + } + + transferAgent.lock.Lock() + partition := transferAgent.partitionPool.Get(partitionNumber) + + if partition == nil || !transferAgent.partitionIsTransferrable(partitionNumber) { + Log.Warningf("The specified partition (%d) does not exist at this node. Unable to fulfill transfer request", partitionNumber) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, "\n") + + transferAgent.lock.Unlock() + + return + } + + + transfer, _ := transferAgent.transferFactory.CreateOutgoingTransfer(partition) + transfer.UseFilter(func(entry Entry) bool { + if !transferAgent.configController.ClusterController().SiteExists(entry.Site) { + Log.Debugf("Transfer of partition %d ignoring entry from site %s since that site was removed", partitionNumber, entry.Site) + + return false + } + + return true + }) + + transferAgent.registerOutgoingTransfer(partitionNumber, transfer) + transferAgent.lock.Unlock() + + defer func() { + transferAgent.lock.Lock() + transferAgent.unregisterOutgoingTransfer(partitionNumber, transfer) + transferAgent.lock.Unlock() + }() + + transferEncoder := NewTransferEncoder(transfer) + r, err := transferEncoder.Encode() + + if err != nil { + Log.Warningf("An error occurred while encoding partition %d. Unable to fulfill transfer request: %v", partitionNumber, err.Error()) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "\n") + + return + } + + Log.Infof("Start sending partition %d to remote node...", partitionNumber) + + w.Header().Set("Content-Type", "application/json; charset=utf8") + w.WriteHeader(http.StatusOK) + written, err := io.Copy(w, r) + + if err != nil { + Log.Errorf("An error occurred while sending partition %d to requesting node after sending %d bytes: %v", partitionNumber, written, err.Error()) + + return + } + + Log.Infof("Done sending partition %d to remote node. Bytes written: %d", partitionNumber, written) + }).Methods("GET") +} diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_encoding.go b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_encoding.go new file mode 100644 index 0000000..8c13ff7 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_encoding.go @@ -0,0 +1,130 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "io" + "encoding/json" +) + +type PartitionTransferEncoder interface { + Encode() (io.Reader, error) +} + +type PartitionTransferDecoder interface { + Decode() (PartitionTransfer, error) +} + +type TransferEncoder struct { + transfer PartitionTransfer + reader io.Reader +} + +func NewTransferEncoder(transfer PartitionTransfer) *TransferEncoder { + return &TransferEncoder{ + transfer: transfer, + } +} + +func (encoder *TransferEncoder) Encode() (io.Reader, error) { + if encoder.reader != nil { + return encoder.reader, nil + } + + encoder.reader = &JSONPartitionReader{ + PartitionTransfer: encoder.transfer, + } + + return encoder.reader, nil +} + +type JSONPartitionReader struct { + PartitionTransfer PartitionTransfer + needsDelimiter bool + currentChunk []byte +} + +func (partitionReader *JSONPartitionReader) Read(p []byte) (n int, err error) { + for len(p) > 0 { + if len(partitionReader.currentChunk) == 0 { + chunk, err := partitionReader.nextChunk() + + if err != nil { + return n, err + } + + if chunk == nil { + return n, io.EOF + } + + partitionReader.currentChunk = chunk + } + + if partitionReader.needsDelimiter { + nCopied := copy(p, []byte("\n")) + p = p[nCopied:] + n += nCopied + partitionReader.needsDelimiter = false + } + + nCopied := copy(p, partitionReader.currentChunk) + p = p[nCopied:] + n += nCopied + partitionReader.currentChunk = partitionReader.currentChunk[nCopied:] + + // if we have copied a whole chunk the next character needs to be a delimeter + partitionReader.needsDelimiter = len(partitionReader.currentChunk) == 0 + } + + return n, nil +} + +func (partitionReader *JSONPartitionReader) nextChunk() ([]byte, error) { + nextChunk, err := partitionReader.PartitionTransfer.NextChunk() + + if err != nil { + return nil, err + } + + if nextChunk.IsEmpty() { + return nil, nil + } + + return json.Marshal(nextChunk) +} + +type TransferDecoder struct { + transfer PartitionTransfer +} + +func NewTransferDecoder(reader io.Reader) *TransferDecoder { + return &TransferDecoder{ + transfer: NewIncomingTransfer(reader), + } +} + +func (decoder *TransferDecoder) Decode() (PartitionTransfer, error) { + return decoder.transfer, nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_factory.go b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_factory.go new file mode 100644 index 0000000..907d543 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_factory.go @@ -0,0 +1,47 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "io" + + . "github.com/armPelionEdge/devicedb/partition" +) + +type PartitionTransferFactory interface { + CreateIncomingTransfer(reader io.Reader) PartitionTransfer + CreateOutgoingTransfer(partition Partition) (PartitionTransfer, error) +} + +type TransferFactory struct { +} + +func (transferFactory *TransferFactory) CreateIncomingTransfer(reader io.Reader) PartitionTransfer { + return NewIncomingTransfer(reader) +} + +func (transferFactory *TransferFactory) CreateOutgoingTransfer(partition Partition) (PartitionTransfer, error) { + return NewOutgoingTransfer(partition, 0), nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_partner_strategy.go b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_partner_strategy.go new file mode 100644 index 0000000..62309bb --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_partner_strategy.go @@ -0,0 +1,62 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "math/rand" + + . "github.com/armPelionEdge/devicedb/cluster" +) + +type PartitionTransferPartnerStrategy interface { + ChooseTransferPartner(partition uint64) uint64 +} + +type RandomTransferPartnerStrategy struct { + configController ClusterConfigController +} + +func NewRandomTransferPartnerStrategy(configController ClusterConfigController) *RandomTransferPartnerStrategy { + return &RandomTransferPartnerStrategy{ + configController: configController, + } +} + +func (partnerStrategy *RandomTransferPartnerStrategy) ChooseTransferPartner(partition uint64) uint64 { + holders := partnerStrategy.configController.ClusterController().PartitionHolders(partition) + + if len(holders) == 0 { + return 0 + } + + // randomly choose a holder to transfer from + return holders[rand.Int() % len(holders)] +} + +// Choose a partner +// The node that needs to perform a partition transfer prioritizes transfer +// partners like so from best candidate to worst: +// 1) A node that is a holder of a replica that this node now owns +// 2) A node that is a holder of some replica of this partition but not one that overlaps with us diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_proposer.go b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_proposer.go new file mode 100644 index 0000000..bbf849c --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_proposer.go @@ -0,0 +1,160 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "sync" + + . "github.com/armPelionEdge/devicedb/cluster" +) + +type PartitionTransferProposer interface { + QueueTransferProposal(partition uint64, replica uint64, after <-chan int) <-chan error + CancelTransferProposal(partition uint64, replica uint64) + CancelTransferProposals(partition uint64) + PendingProposals(partition uint64) int + QueuedProposals() map[uint64]map[uint64]bool +} + +type TransferProposer struct { + configController ClusterConfigController + transferCancelers map[uint64]map[uint64]*Canceler + lock sync.Mutex +} + +func NewTransferProposer(configController ClusterConfigController) *TransferProposer { + return &TransferProposer{ + configController: configController, + transferCancelers: make(map[uint64]map[uint64]*Canceler, 0), + } +} + +func (transferProposer *TransferProposer) QueueTransferProposal(partition uint64, replica uint64, after <-chan int) <-chan error { + transferProposer.lock.Lock() + defer transferProposer.lock.Unlock() + + result := make(chan error, 1) + ctx, cancel := context.WithCancel(context.Background()) + canceler := &Canceler{ Cancel: cancel } + + if _, ok := transferProposer.transferCancelers[partition]; !ok { + transferProposer.transferCancelers[partition] = make(map[uint64]*Canceler) + } + + transferProposer.transferCancelers[partition][replica] = canceler + + go func() { + // wait until the preceding operation finishes or context is cancelled + select { + case <-after: + case <-ctx.Done(): + return + } + + // Problem: All transfers are queued and proposed but ClusterCommand() does not return for some of them + err := transferProposer.configController.ClusterCommand(ctx, ClusterTakePartitionReplicaBody{ NodeID: transferProposer.configController.ClusterController().LocalNodeID, Partition: partition, Replica: replica }) + + transferProposer.lock.Lock() + + defer func() { + transferProposer.lock.Unlock() + result <- err + }() + + if _, ok := transferProposer.transferCancelers[partition]; !ok { + return + } + + // It is possible that this proposal was cancelled but then one for the + // same replica was started before this cleanup function was called. In + // this case the map might contain a new canceller for a new proposal for + // the same replica. This requires an equality check so this proposal doesn't + // step on the toes of another one + if transferProposer.transferCancelers[partition][replica] == canceler { + delete(transferProposer.transferCancelers[partition], replica) + + if len(transferProposer.transferCancelers[partition]) == 0 { + delete(transferProposer.transferCancelers, partition) + } + } + }() + + return result +} + +func (transferProposer *TransferProposer) CancelTransferProposal(partition uint64, replica uint64) { + transferProposer.lock.Lock() + defer transferProposer.lock.Unlock() + + transferProposer.cancelTransferProposal(partition, replica) +} + +func (transferProposer *TransferProposer) cancelTransferProposal(partition uint64, replica uint64) { + if cancelers, ok := transferProposer.transferCancelers[partition]; ok { + if canceler, ok := cancelers[replica]; ok { + canceler.Cancel() + } + + delete(cancelers, replica) + + if len(cancelers) == 0 { + delete(transferProposer.transferCancelers, partition) + } + } +} + +func (transferProposer *TransferProposer) CancelTransferProposals(partition uint64) { + transferProposer.lock.Lock() + defer transferProposer.lock.Unlock() + + for replica, _ := range transferProposer.transferCancelers[partition] { + transferProposer.cancelTransferProposal(partition, replica) + } +} + +func (transferProposer *TransferProposer) PendingProposals(partition uint64) int { + transferProposer.lock.Lock() + defer transferProposer.lock.Unlock() + + return len(transferProposer.transferCancelers[partition]) +} + +func (transferProposer *TransferProposer) QueuedProposals() map[uint64]map[uint64]bool { + transferProposer.lock.Lock() + defer transferProposer.lock.Unlock() + + allProposals := make(map[uint64]map[uint64]bool, 0) + + for partition, replicas := range transferProposer.transferCancelers { + allProposals[partition] = make(map[uint64]bool, 0) + + for replica, _ := range replicas { + allProposals[partition][replica] = true + } + } + + return allProposals +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_transport.go b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_transport.go new file mode 100644 index 0000000..8f3f349 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transfer/transfer_transport.go @@ -0,0 +1,107 @@ +package transfer +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "context" + "errors" + "fmt" + "net/http" + "io" + + . "github.com/armPelionEdge/devicedb/cluster" +) + +const DefaultEndpointURL = "/partitions/%d/keys" +var EBadResponse = errors.New("Node responded with a bad response") + +type PartitionTransferTransport interface { + Get(nodeID uint64, partition uint64) (io.Reader, func(), error) +} + +type HTTPTransferTransport struct { + httpClient *http.Client + configController ClusterConfigController + endpointURL string +} + +func NewHTTPTransferTransport(configController ClusterConfigController, httpClient *http.Client) *HTTPTransferTransport { + return &HTTPTransferTransport{ + httpClient: httpClient, + configController: configController, + endpointURL: DefaultEndpointURL, + } +} + +func (transferTransport *HTTPTransferTransport) SetEndpointURL(endpointURL string) *HTTPTransferTransport { + transferTransport.endpointURL = endpointURL + + return transferTransport +} + +func (transferTransport *HTTPTransferTransport) Get(nodeID uint64, partition uint64) (io.Reader, func(), error) { + peerAddress := transferTransport.configController.ClusterController().ClusterMemberAddress(nodeID) + + if peerAddress.IsEmpty() { + return nil, nil, ENoSuchNode + } + + endpointURL := peerAddress.ToHTTPURL(fmt.Sprintf(transferTransport.endpointURL, partition)) + request, err := http.NewRequest("GET", endpointURL, nil) + + if err != nil { + return nil, nil, err + } + + ctx, cancel := context.WithCancel(context.Background()) + request.WithContext(ctx) + + resp, err := transferTransport.httpClient.Do(request) + + if err != nil { + cancel() + + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + + return nil, nil, err + } + + if resp.StatusCode != http.StatusOK { + cancel() + resp.Body.Close() + + return nil, nil, EBadResponse + } + + close := func() { + // should do any cleanup on behalf of this request + cancel() + resp.Body.Close() + } + + return resp.Body, close, nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/transport/transport.go b/vendor/github.com/armPelionEdge/devicedb/transport/transport.go new file mode 100644 index 0000000..7544ba2 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/transport/transport.go @@ -0,0 +1,223 @@ +package transport +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/json" + "encoding/base64" + + . "github.com/armPelionEdge/devicedb/data" + . "github.com/armPelionEdge/devicedb/error" + . "github.com/armPelionEdge/devicedb/bucket" + . "github.com/armPelionEdge/devicedb/logging" +) + +type TransportRow struct { + Key string `json:"key"` + LocalVersion uint64 `json:"serial"` + Context string `json:"context"` + Siblings []string `json:"siblings"` +} + +func (tr *TransportRow) FromRow(row *Row) error { + if row == nil || row.Siblings == nil { + return nil + } + + context, err := EncodeContext(row.Siblings.Join()) + + if err != nil { + Log.Warningf("Unable to encode context: %v", err) + + return EInvalidContext + } + + tr.LocalVersion = row.LocalVersion + tr.Key = row.Key + tr.Context = context + tr.Siblings = make([]string, 0, row.Siblings.Size()) + + for sibling := range row.Siblings.Iter() { + if !sibling.IsTombstone() { + tr.Siblings = append(tr.Siblings, string(sibling.Value())) + } + } + + return nil +} + +type TransportSiblingSet struct { + Siblings []string `json:"siblings"` + Context string `json:"context"` +} + +func (tss *TransportSiblingSet) FromSiblingSet(siblingSet *SiblingSet) error { + if siblingSet == nil { + tss.Siblings = nil + tss.Context = "" + + return nil + } + + context, err := EncodeContext(siblingSet.Join()) + + if err != nil { + Log.Warningf("Unable to encode context: %v", err) + + return EInvalidContext + } + + tss.Context = context + tss.Siblings = make([]string, 0, siblingSet.Size()) + + for sibling := range siblingSet.Iter() { + if !sibling.IsTombstone() { + tss.Siblings = append(tss.Siblings, string(sibling.Value())) + } + } + + return nil +} + +func EncodeContext(context map[string]uint64) (string, error) { + var encodedContext string + + rawJSON, err := json.Marshal(context) + + if err != nil { + return "", err + } + + encodedContext = base64.StdEncoding.EncodeToString(rawJSON) + + return encodedContext, nil +} + +func DecodeContext(context string) (map[string]uint64, error) { + var decodedContext map[string]uint64 + + rawJSON, err := base64.StdEncoding.DecodeString(context) + + if err != nil { + return nil, err + } + + err = json.Unmarshal(rawJSON, &decodedContext) + + if err != nil { + return nil, err + } + + return decodedContext, nil +} + +type TransportUpdateBatch []TransportUpdateOp + +type TransportUpdateOp struct { + Type string `json:"type"` + Key string `json:"key"` + Value string `json:"value"` + Context string `json:"context"` +} + +func (tub TransportUpdateBatch) ToUpdateBatch(updateBatch *UpdateBatch) error { + var tempUpdateBatch = NewUpdateBatch() + + for _, tuo := range tub { + if tuo.Type != "put" && tuo.Type != "delete" { + Log.Warningf("%s is not a valid operation", tuo.Type) + + return EInvalidOp + } + + var context map[string]uint64 + var err error + + if len(tuo.Context) != 0 { + context, err = DecodeContext(tuo.Context) + + if err != nil { + Log.Warningf("Could not decode context string %s in update operation: %v", tuo.Context, err) + + return EInvalidContext + } + } + + if tuo.Type == "put" { + _, err = tempUpdateBatch.Put([]byte(tuo.Key), []byte(tuo.Value), NewDVV(NewDot("", 0), context)) + } else { + _, err = tempUpdateBatch.Delete([]byte(tuo.Key), NewDVV(NewDot("", 0), context)) + } + + if err != nil { + return err + } + } + + updateBatch.RawBatch = tempUpdateBatch.RawBatch + updateBatch.Contexts = tempUpdateBatch.Contexts + + return nil +} + +func (tub TransportUpdateBatch) FromUpdateBatch(updateBatch *UpdateBatch) error { + if len(tub) != len(updateBatch.Batch().Ops()) { + Log.Warningf("Transport update batch is not the same size as the update batch") + + return ELength + } + + index := 0 + + for k, op := range updateBatch.Batch().Ops() { + context, ok := updateBatch.Context()[k] + + if !ok || context == nil { + context = NewDVV(NewDot("", 0), map[string]uint64{ }) + } + + encodedContext, _ := EncodeContext(context.Context()) + + if op.IsDelete() { + tub[index] = TransportUpdateOp{ + Type: "delete", + Key: k, + Value: "", + Context: encodedContext, + } + } else { + tub[index] = TransportUpdateOp{ + Type: "put", + Key: k, + Value: string(op.Value()), + Context: encodedContext, + } + } + + index += 1 + } + + return nil +} diff --git a/vendor/github.com/armPelionEdge/devicedb/util/multilock.go b/vendor/github.com/armPelionEdge/devicedb/util/multilock.go new file mode 100644 index 0000000..1e9371c --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/util/multilock.go @@ -0,0 +1,88 @@ +package util +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + + + +import ( + "sync" +) + +type MultiLock struct { + mapLock sync.Mutex + conditionMap map[string]*sync.RWMutex + countMap map[string]uint64 +} + +func NewMultiLock() *MultiLock { + var ml MultiLock + + ml.conditionMap = make(map[string]*sync.RWMutex) + ml.countMap = make(map[string]uint64) + + return &ml +} + +func (multiLock *MultiLock) Lock(partitioningKey []byte) { + multiLock.mapLock.Lock() + + lock, ok := multiLock.conditionMap[string(partitioningKey)] + + if !ok { + lock = new(sync.RWMutex) + + multiLock.conditionMap[string(partitioningKey)] = lock + multiLock.countMap[string(partitioningKey)] = 0 + } + + multiLock.countMap[string(partitioningKey)] += 1 + + multiLock.mapLock.Unlock() + + lock.Lock() +} + +func (multiLock *MultiLock) Unlock(partitioningKey []byte) { + multiLock.mapLock.Lock() + + lock, ok := multiLock.conditionMap[string(partitioningKey)] + + if !ok { + multiLock.mapLock.Unlock() + + return + } + + multiLock.countMap[string(partitioningKey)] -= 1 + + if multiLock.countMap[string(partitioningKey)] == 0 { + delete(multiLock.conditionMap, string(partitioningKey)) + delete(multiLock.countMap, string(partitioningKey)) + } + + multiLock.mapLock.Unlock() + + lock.Unlock() +} diff --git a/vendor/github.com/armPelionEdge/devicedb/util/new_storage_driver.go b/vendor/github.com/armPelionEdge/devicedb/util/new_storage_driver.go new file mode 100644 index 0000000..c5db9a2 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/util/new_storage_driver.go @@ -0,0 +1,33 @@ +package util +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + . "github.com/armPelionEdge/devicedb/storage" +) + +func MakeNewStorageDriver() StorageDriver { + return NewLevelDBStorageDriver("/tmp/testdb-" + RandomString(), nil) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/util/random_string.go b/vendor/github.com/armPelionEdge/devicedb/util/random_string.go new file mode 100644 index 0000000..7296a5f --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/util/random_string.go @@ -0,0 +1,41 @@ +package util +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "fmt" + "encoding/binary" + "crypto/rand" +) + +func RandomString() string { + randomBytes := make([]byte, 16) + rand.Read(randomBytes) + + high := binary.BigEndian.Uint64(randomBytes[:8]) + low := binary.BigEndian.Uint64(randomBytes[8:]) + + return fmt.Sprintf("%05x%05x", high, low) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/util/request.go b/vendor/github.com/armPelionEdge/devicedb/util/request.go new file mode 100644 index 0000000..9273bc4 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/util/request.go @@ -0,0 +1,67 @@ +package util +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "sync" +) + +type RequestMap struct { + lock sync.Mutex + requests map[uint64]chan interface{} +} + +func NewRequestMap() *RequestMap { + return &RequestMap{ + requests: make(map[uint64]chan interface{}), + } +} + +func (rm *RequestMap) MakeRequest(id uint64) <-chan interface{} { + rm.lock.Lock() + defer rm.lock.Unlock() + + if _, ok := rm.requests[id]; ok { + return nil + } + + requestChan := make(chan interface{}, 1) + rm.requests[id] = requestChan + + return requestChan +} + +func (rm *RequestMap) Respond(id uint64, r interface{}) { + rm.lock.Lock() + defer rm.lock.Unlock() + + if _, ok := rm.requests[id]; !ok { + return + } + + rm.requests[id] <- r + close(rm.requests[id]) + delete(rm.requests, id) +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/util/rw_try_lock.go b/vendor/github.com/armPelionEdge/devicedb/util/rw_try_lock.go new file mode 100644 index 0000000..f7b2758 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/util/rw_try_lock.go @@ -0,0 +1,72 @@ +package util +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "sync" +) + +type RWTryLock struct { + mu sync.Mutex + rwMu sync.RWMutex + writeLocked bool +} + +func (lock *RWTryLock) WLock() { + lock.mu.Lock() + lock.writeLocked = true + lock.mu.Unlock() + + lock.rwMu.Lock() + + lock.mu.Lock() + lock.writeLocked = true + lock.mu.Unlock() +} + +func (lock *RWTryLock) WUnlock() { + lock.mu.Lock() + defer lock.mu.Unlock() + + lock.writeLocked = false + lock.rwMu.Unlock() +} + +func (lock *RWTryLock) TryRLock() bool { + lock.mu.Lock() + defer lock.mu.Unlock() + + if lock.writeLocked { + return false + } + + lock.rwMu.RLock() + + return true +} + +func (lock *RWTryLock) RUnlock() { + lock.rwMu.RUnlock() +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/util/uuid.go b/vendor/github.com/armPelionEdge/devicedb/util/uuid.go new file mode 100644 index 0000000..dd6ae5e --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/util/uuid.go @@ -0,0 +1,48 @@ +package util +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +import ( + "encoding/binary" + "crypto/rand" + "github.com/google/uuid" +) + +func UUID64() uint64 { + randomBytes := make([]byte, 8) + rand.Read(randomBytes) + + return binary.BigEndian.Uint64(randomBytes[:8]) +} + +func UUID() (string, error) { + newUUID, err := uuid.NewRandom() + + if err != nil { + return "", err + } + + return newUUID.String(), nil +} \ No newline at end of file diff --git a/vendor/github.com/armPelionEdge/devicedb/version/version.go b/vendor/github.com/armPelionEdge/devicedb/version/version.go new file mode 100644 index 0000000..a88bb48 --- /dev/null +++ b/vendor/github.com/armPelionEdge/devicedb/version/version.go @@ -0,0 +1,32 @@ +package version +// + // Copyright (c) 2019 ARM Limited. + // + // SPDX-License-Identifier: MIT + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + // SOFTWARE. + // + + +// Semantic versioning +// x.y.z +// Increment x when you make incompatible API changes +// Increment y when you add functionality in a backwards-compatible manner +// Increment z when you make backwards-compatible bug fixes +var DEVICEDB_VERSION = "1.9.4" diff --git a/vendor/github.com/armpelionedge/edge-go-logger/logging/env_watcher.go b/vendor/github.com/armpelionedge/edge-go-logger/logging/env_watcher.go deleted file mode 100644 index 7b84632..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/logging/env_watcher.go +++ /dev/null @@ -1,65 +0,0 @@ -package logging -// -// Copyright (c) 2018, Arm Limited and affiliates. -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - - - -import ( - "io/ioutil" - "os" - "time" -) - -const ( - // debug, info, notice, warning, error, critical - LogLevelEnvironmentVariable string = "WIGWAG_LOG_LEVEL" - LogLevelSyncPeriodSeconds int = 1 -) - -// Checks the log level environment variable periodically for changes -// update running log level if necessary -func watchLoggingConfig() { - var logLevelSetting string - - for { - time.Sleep(time.Second * time.Duration(LogLevelSyncPeriodSeconds)) - - var logLevelSettingFile string = os.Getenv(LogLevelEnvironmentVariable) - - if logLevelSettingFile == "" { - continue - } - - contents, err := ioutil.ReadFile(logLevelSettingFile) - - if err != nil { - Log.Errorf("Unable to retrieve log level from %s: %v", logLevelSettingFile, err) - - continue - } - - var newLogLevelSetting string = string(contents) - - if logLevelSetting != newLogLevelSetting && LogLevelIsValid(newLogLevelSetting) { - Log.Debugf("Setting logging level to %s", newLogLevelSetting) - - logLevelSetting = newLogLevelSetting - - SetLoggingLevel(newLogLevelSetting) - } - } -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/logging/logging.go b/vendor/github.com/armpelionedge/edge-go-logger/logging/logging.go deleted file mode 100644 index b628fcd..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/logging/logging.go +++ /dev/null @@ -1,95 +0,0 @@ -package logging -// -// Copyright (c) 2018, Arm Limited and affiliates. -// SPDX-License-Identifier: Apache-2.0 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - - -import ( - "os" - "github.com/op/go-logging" - "strings" - "sync" -) - -type splitLogBackend struct { - rwMu sync.RWMutex - outLogBackend logging.LeveledBackend - errLogBackend logging.LeveledBackend -} - -func newSplitLogBackend(outLogBackend, errLogBackend logging.LeveledBackend) *splitLogBackend { - return &splitLogBackend{ - outLogBackend: outLogBackend, - errLogBackend: errLogBackend, - } -} - -func (slb *splitLogBackend) Log(level logging.Level, calldepth int, rec *logging.Record) error { - // Uses RWMutex so that calls to Log can happen concurrently with each other but not - // with updates to the log level - slb.rwMu.RLock() - defer slb.rwMu.RUnlock() - - if level <= logging.WARNING { - return slb.errLogBackend.Log(level, calldepth + 2, rec) - } - - return slb.outLogBackend.Log(level, calldepth + 2, rec) -} - -func (slb *splitLogBackend) SetLevel(level logging.Level, module string) { - slb.rwMu.Lock() - defer slb.rwMu.Unlock() - - slb.outLogBackend.SetLevel(level, module) - slb.errLogBackend.SetLevel(level, module) -} - -var Log = logging.MustGetLogger("") -var log = Log -var loggingBackend *splitLogBackend - -func init() { - var format = logging.MustStringFormatter(`%{color}%{time:15:04:05.000} ▶ %{level:.4s} %{shortfile}%{color:reset} %{message}`) - var outBackend = logging.NewLogBackend(os.Stdout, "", 0) - var outBackendFormatter = logging.NewBackendFormatter(outBackend, format) - var outLogBackend = logging.AddModuleLevel(outBackendFormatter) - var errBackend = logging.NewLogBackend(os.Stderr, "", 0) - var errBackendFormatter = logging.NewBackendFormatter(errBackend, format) - var errLogBackend = logging.AddModuleLevel(errBackendFormatter) - - loggingBackend = newSplitLogBackend(outLogBackend, errLogBackend) - - logging.SetBackend(loggingBackend) - - go watchLoggingConfig() -} - -func LogLevelIsValid(ll string) bool { - _, err := logging.LogLevel(strings.ToUpper(ll)) - - return err == nil -} - -func SetLoggingLevel(ll string) { - logLevel, err := logging.LogLevel(strings.ToUpper(ll)) - - if err != nil { - logLevel = logging.ERROR - } - - loggingBackend.SetLevel(logLevel, "") -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/.travis.yml b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/.travis.yml deleted file mode 100644 index 70e012b..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - tip diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/CHANGELOG.md b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/CHANGELOG.md deleted file mode 100644 index 4b7d233..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/CHANGELOG.md +++ /dev/null @@ -1,19 +0,0 @@ -# Changelog - -## 2.0.0-rc1 (2016-02-11) - -Time flies and it has been three years since this package was first released. -There have been a couple of API changes I have wanted to do for some time but -I've tried to maintain backwards compatibility. Some inconsistencies in the -API have started to show, proper vendor support in Go out of the box and -the fact that `go vet` will give warnings -- I have decided to bump the major -version. - -* Make eg. `Info` and `Infof` do different things. You want to change all calls - to `Info` with a string format go to `Infof` etc. In many cases, `go vet` will - guide you. -* `Id` in `Record` is now called `ID` - -## 1.0.0 (2013-02-21) - -Initial release diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/CONTRIBUTORS b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/CONTRIBUTORS deleted file mode 100644 index 958416e..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/CONTRIBUTORS +++ /dev/null @@ -1,5 +0,0 @@ -Alec Thomas -Guilhem Lettron -Ivan Daniluk -Nimi Wariboko Jr -Róbert Selvek diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/README.md b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/README.md deleted file mode 100644 index 0a7326b..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/README.md +++ /dev/null @@ -1,93 +0,0 @@ -## Golang logging library - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/op/go-logging) [![build](https://img.shields.io/travis/op/go-logging.svg?style=flat)](https://travis-ci.org/op/go-logging) - -Package logging implements a logging infrastructure for Go. Its output format -is customizable and supports different logging backends like syslog, file and -memory. Multiple backends can be utilized with different log levels per backend -and logger. - -**_NOTE:_** backwards compatibility promise have been dropped for master. Please -vendor this package or use `gopkg.in/op/go-logging.v1` for previous version. See -[changelog](CHANGELOG.md) for details. - -## Example - -Let's have a look at an [example](examples/example.go) which demonstrates most -of the features found in this library. - -[![Example Output](examples/example.png)](examples/example.go) - -```go -package main - -import ( - "os" - - "github.com/op/go-logging" -) - -var log = logging.MustGetLogger("example") - -// Example format string. Everything except the message has a custom color -// which is dependent on the log level. Many fields have a custom output -// formatting too, eg. the time returns the hour down to the milli second. -var format = logging.MustStringFormatter( - `%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}`, -) - -// Password is just an example type implementing the Redactor interface. Any -// time this is logged, the Redacted() function will be called. -type Password string - -func (p Password) Redacted() interface{} { - return logging.Redact(string(p)) -} - -func main() { - // For demo purposes, create two backend for os.Stderr. - backend1 := logging.NewLogBackend(os.Stderr, "", 0) - backend2 := logging.NewLogBackend(os.Stderr, "", 0) - - // For messages written to backend2 we want to add some additional - // information to the output, including the used log level and the name of - // the function. - backend2Formatter := logging.NewBackendFormatter(backend2, format) - - // Only errors and more severe messages should be sent to backend1 - backend1Leveled := logging.AddModuleLevel(backend1) - backend1Leveled.SetLevel(logging.ERROR, "") - - // Set the backends to be used. - logging.SetBackend(backend1Leveled, backend2Formatter) - - log.Debugf("debug %s", Password("secret")) - log.Info("info") - log.Notice("notice") - log.Warning("warning") - log.Error("err") - log.Critical("crit") -} -``` - -## Installing - -### Using *go get* - - $ go get github.com/op/go-logging - -After this command *go-logging* is ready to use. Its source will be in: - - $GOPATH/src/pkg/github.com/op/go-logging - -You can use `go get -u` to update the package. - -## Documentation - -For docs, see http://godoc.org/github.com/op/go-logging or run: - - $ godoc github.com/op/go-logging - -## Additional resources - -* [wslog](https://godoc.org/github.com/cryptix/exp/wslog) -- exposes log messages through a WebSocket. diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/backend.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/backend.go deleted file mode 100644 index 74d9201..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/backend.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -// defaultBackend is the backend used for all logging calls. -var defaultBackend LeveledBackend - -// Backend is the interface which a log backend need to implement to be able to -// be used as a logging backend. -type Backend interface { - Log(Level, int, *Record) error -} - -// SetBackend replaces the backend currently set with the given new logging -// backend. -func SetBackend(backends ...Backend) LeveledBackend { - var backend Backend - if len(backends) == 1 { - backend = backends[0] - } else { - backend = MultiLogger(backends...) - } - - defaultBackend = AddModuleLevel(backend) - return defaultBackend -} - -// SetLevel sets the logging level for the specified module. The module -// corresponds to the string specified in GetLogger. -func SetLevel(level Level, module string) { - defaultBackend.SetLevel(level, module) -} - -// GetLevel returns the logging level for the specified module. -func GetLevel(module string) Level { - return defaultBackend.GetLevel(module) -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/format.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/format.go deleted file mode 100644 index 7160674..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/format.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "path" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -// TODO see Formatter interface in fmt/print.go -// TODO try text/template, maybe it have enough performance -// TODO other template systems? -// TODO make it possible to specify formats per backend? -type fmtVerb int - -const ( - fmtVerbTime fmtVerb = iota - fmtVerbLevel - fmtVerbID - fmtVerbPid - fmtVerbProgram - fmtVerbModule - fmtVerbMessage - fmtVerbLongfile - fmtVerbShortfile - fmtVerbLongpkg - fmtVerbShortpkg - fmtVerbLongfunc - fmtVerbShortfunc - fmtVerbCallpath - fmtVerbLevelColor - - // Keep last, there are no match for these below. - fmtVerbUnknown - fmtVerbStatic -) - -var fmtVerbs = []string{ - "time", - "level", - "id", - "pid", - "program", - "module", - "message", - "longfile", - "shortfile", - "longpkg", - "shortpkg", - "longfunc", - "shortfunc", - "callpath", - "color", -} - -const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00" - -var defaultVerbsLayout = []string{ - rfc3339Milli, - "s", - "d", - "d", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "0", - "", -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) -) - -func getFmtVerbByName(name string) fmtVerb { - for i, verb := range fmtVerbs { - if name == verb { - return fmtVerb(i) - } - } - return fmtVerbUnknown -} - -// Formatter is the required interface for a custom log record formatter. -type Formatter interface { - Format(calldepth int, r *Record, w io.Writer) error -} - -// formatter is used by all backends unless otherwise overriden. -var formatter struct { - sync.RWMutex - def Formatter -} - -func getFormatter() Formatter { - formatter.RLock() - defer formatter.RUnlock() - return formatter.def -} - -var ( - // DefaultFormatter is the default formatter used and is only the message. - DefaultFormatter = MustStringFormatter("%{message}") - - // GlogFormatter mimics the glog format - GlogFormatter = MustStringFormatter("%{level:.1s}%{time:0102 15:04:05.999999} %{pid} %{shortfile}] %{message}") -) - -// SetFormatter sets the default formatter for all new backends. A backend will -// fetch this value once it is needed to format a record. Note that backends -// will cache the formatter after the first point. For now, make sure to set -// the formatter before logging. -func SetFormatter(f Formatter) { - formatter.Lock() - defer formatter.Unlock() - formatter.def = f -} - -var formatRe = regexp.MustCompile(`%{([a-z]+)(?::(.*?[^\\]))?}`) - -type part struct { - verb fmtVerb - layout string -} - -// stringFormatter contains a list of parts which explains how to build the -// formatted string passed on to the logging backend. -type stringFormatter struct { - parts []part -} - -// NewStringFormatter returns a new Formatter which outputs the log record as a -// string based on the 'verbs' specified in the format string. -// -// The verbs: -// -// General: -// %{id} Sequence number for log message (uint64). -// %{pid} Process id (int) -// %{time} Time when log occurred (time.Time) -// %{level} Log level (Level) -// %{module} Module (string) -// %{program} Basename of os.Args[0] (string) -// %{message} Message (string) -// %{longfile} Full file name and line number: /a/b/c/d.go:23 -// %{shortfile} Final file name element and line number: d.go:23 -// %{callpath} Callpath like main.a.b.c...c "..." meaning recursive call ~. meaning truncated path -// %{color} ANSI color based on log level -// -// For normal types, the output can be customized by using the 'verbs' defined -// in the fmt package, eg. '%{id:04d}' to make the id output be '%04d' as the -// format string. -// -// For time.Time, use the same layout as time.Format to change the time format -// when output, eg "2006-01-02T15:04:05.999Z-07:00". -// -// For the 'color' verb, the output can be adjusted to either use bold colors, -// i.e., '%{color:bold}' or to reset the ANSI attributes, i.e., -// '%{color:reset}' Note that if you use the color verb explicitly, be sure to -// reset it or else the color state will persist past your log message. e.g., -// "%{color:bold}%{time:15:04:05} %{level:-8s}%{color:reset} %{message}" will -// just colorize the time and level, leaving the message uncolored. -// -// For the 'callpath' verb, the output can be adjusted to limit the printing -// the stack depth. i.e. '%{callpath:3}' will print '~.a.b.c' -// -// Colors on Windows is unfortunately not supported right now and is currently -// a no-op. -// -// There's also a couple of experimental 'verbs'. These are exposed to get -// feedback and needs a bit of tinkering. Hence, they might change in the -// future. -// -// Experimental: -// %{longpkg} Full package path, eg. github.com/go-logging -// %{shortpkg} Base package path, eg. go-logging -// %{longfunc} Full function name, eg. littleEndian.PutUint32 -// %{shortfunc} Base function name, eg. PutUint32 -// %{callpath} Call function path, eg. main.a.b.c -func NewStringFormatter(format string) (Formatter, error) { - var fmter = &stringFormatter{} - - // Find the boundaries of all %{vars} - matches := formatRe.FindAllStringSubmatchIndex(format, -1) - if matches == nil { - return nil, errors.New("logger: invalid log format: " + format) - } - - // Collect all variables and static text for the format - prev := 0 - for _, m := range matches { - start, end := m[0], m[1] - if start > prev { - fmter.add(fmtVerbStatic, format[prev:start]) - } - - name := format[m[2]:m[3]] - verb := getFmtVerbByName(name) - if verb == fmtVerbUnknown { - return nil, errors.New("logger: unknown variable: " + name) - } - - // Handle layout customizations or use the default. If this is not for the - // time, color formatting or callpath, we need to prefix with %. - layout := defaultVerbsLayout[verb] - if m[4] != -1 { - layout = format[m[4]:m[5]] - } - if verb != fmtVerbTime && verb != fmtVerbLevelColor && verb != fmtVerbCallpath { - layout = "%" + layout - } - - fmter.add(verb, layout) - prev = end - } - end := format[prev:] - if end != "" { - fmter.add(fmtVerbStatic, end) - } - - // Make a test run to make sure we can format it correctly. - t, err := time.Parse(time.RFC3339, "2010-02-04T21:00:57-08:00") - if err != nil { - panic(err) - } - testFmt := "hello %s" - r := &Record{ - ID: 12345, - Time: t, - Module: "logger", - Args: []interface{}{"go"}, - fmt: &testFmt, - } - if err := fmter.Format(0, r, &bytes.Buffer{}); err != nil { - return nil, err - } - - return fmter, nil -} - -// MustStringFormatter is equivalent to NewStringFormatter with a call to panic -// on error. -func MustStringFormatter(format string) Formatter { - f, err := NewStringFormatter(format) - if err != nil { - panic("Failed to initialized string formatter: " + err.Error()) - } - return f -} - -func (f *stringFormatter) add(verb fmtVerb, layout string) { - f.parts = append(f.parts, part{verb, layout}) -} - -func (f *stringFormatter) Format(calldepth int, r *Record, output io.Writer) error { - for _, part := range f.parts { - if part.verb == fmtVerbStatic { - output.Write([]byte(part.layout)) - } else if part.verb == fmtVerbTime { - output.Write([]byte(r.Time.Format(part.layout))) - } else if part.verb == fmtVerbLevelColor { - doFmtVerbLevelColor(part.layout, r.Level, output) - } else if part.verb == fmtVerbCallpath { - depth, err := strconv.Atoi(part.layout) - if err != nil { - depth = 0 - } - output.Write([]byte(formatCallpath(calldepth+1, depth))) - } else { - var v interface{} - switch part.verb { - case fmtVerbLevel: - v = r.Level - break - case fmtVerbID: - v = r.ID - break - case fmtVerbPid: - v = pid - break - case fmtVerbProgram: - v = program - break - case fmtVerbModule: - v = r.Module - break - case fmtVerbMessage: - v = r.Message() - break - case fmtVerbLongfile, fmtVerbShortfile: - _, file, line, ok := runtime.Caller(calldepth + 1) - if !ok { - file = "???" - line = 0 - } else if part.verb == fmtVerbShortfile { - file = filepath.Base(file) - } - v = fmt.Sprintf("%s:%d", file, line) - case fmtVerbLongfunc, fmtVerbShortfunc, - fmtVerbLongpkg, fmtVerbShortpkg: - // TODO cache pc - v = "???" - if pc, _, _, ok := runtime.Caller(calldepth + 1); ok { - if f := runtime.FuncForPC(pc); f != nil { - v = formatFuncName(part.verb, f.Name()) - } - } - default: - panic("unhandled format part") - } - fmt.Fprintf(output, part.layout, v) - } - } - return nil -} - -// formatFuncName tries to extract certain part of the runtime formatted -// function name to some pre-defined variation. -// -// This function is known to not work properly if the package path or name -// contains a dot. -func formatFuncName(v fmtVerb, f string) string { - i := strings.LastIndex(f, "/") - j := strings.Index(f[i+1:], ".") - if j < 1 { - return "???" - } - pkg, fun := f[:i+j+1], f[i+j+2:] - switch v { - case fmtVerbLongpkg: - return pkg - case fmtVerbShortpkg: - return path.Base(pkg) - case fmtVerbLongfunc: - return fun - case fmtVerbShortfunc: - i = strings.LastIndex(fun, ".") - return fun[i+1:] - } - panic("unexpected func formatter") -} - -func formatCallpath(calldepth int, depth int) string { - v := "" - callers := make([]uintptr, 64) - n := runtime.Callers(calldepth+2, callers) - oldPc := callers[n-1] - - start := n - 3 - if depth > 0 && start >= depth { - start = depth - 1 - v += "~." - } - recursiveCall := false - for i := start; i >= 0; i-- { - pc := callers[i] - if oldPc == pc { - recursiveCall = true - continue - } - oldPc = pc - if recursiveCall { - recursiveCall = false - v += ".." - } - if i < start { - v += "." - } - if f := runtime.FuncForPC(pc); f != nil { - v += formatFuncName(fmtVerbShortfunc, f.Name()) - } - } - return v -} - -// backendFormatter combines a backend with a specific formatter making it -// possible to have different log formats for different backends. -type backendFormatter struct { - b Backend - f Formatter -} - -// NewBackendFormatter creates a new backend which makes all records that -// passes through it beeing formatted by the specific formatter. -func NewBackendFormatter(b Backend, f Formatter) Backend { - return &backendFormatter{b, f} -} - -// Log implements the Log function required by the Backend interface. -func (bf *backendFormatter) Log(level Level, calldepth int, r *Record) error { - // Make a shallow copy of the record and replace any formatter - r2 := *r - r2.formatter = bf.f - return bf.b.Log(level, calldepth+1, &r2) -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/level.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/level.go deleted file mode 100644 index 98dd191..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/level.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "errors" - "strings" - "sync" -) - -// ErrInvalidLogLevel is used when an invalid log level has been used. -var ErrInvalidLogLevel = errors.New("logger: invalid log level") - -// Level defines all available log levels for log messages. -type Level int - -// Log levels. -const ( - CRITICAL Level = iota - ERROR - WARNING - NOTICE - INFO - DEBUG -) - -var levelNames = []string{ - "CRITICAL", - "ERROR", - "WARNING", - "NOTICE", - "INFO", - "DEBUG", -} - -// String returns the string representation of a logging level. -func (p Level) String() string { - return levelNames[p] -} - -// LogLevel returns the log level from a string representation. -func LogLevel(level string) (Level, error) { - for i, name := range levelNames { - if strings.EqualFold(name, level) { - return Level(i), nil - } - } - return ERROR, ErrInvalidLogLevel -} - -// Leveled interface is the interface required to be able to add leveled -// logging. -type Leveled interface { - GetLevel(string) Level - SetLevel(Level, string) - IsEnabledFor(Level, string) bool -} - -// LeveledBackend is a log backend with additional knobs for setting levels on -// individual modules to different levels. -type LeveledBackend interface { - Backend - Leveled -} - -type moduleLeveled struct { - levels map[string]Level - backend Backend - formatter Formatter - once sync.Once -} - -// AddModuleLevel wraps a log backend with knobs to have different log levels -// for different modules. -func AddModuleLevel(backend Backend) LeveledBackend { - var leveled LeveledBackend - var ok bool - if leveled, ok = backend.(LeveledBackend); !ok { - leveled = &moduleLeveled{ - levels: make(map[string]Level), - backend: backend, - } - } - return leveled -} - -// GetLevel returns the log level for the given module. -func (l *moduleLeveled) GetLevel(module string) Level { - level, exists := l.levels[module] - if exists == false { - level, exists = l.levels[""] - // no configuration exists, default to debug - if exists == false { - level = DEBUG - } - } - return level -} - -// SetLevel sets the log level for the given module. -func (l *moduleLeveled) SetLevel(level Level, module string) { - l.levels[module] = level -} - -// IsEnabledFor will return true if logging is enabled for the given module. -func (l *moduleLeveled) IsEnabledFor(level Level, module string) bool { - return level <= l.GetLevel(module) -} - -func (l *moduleLeveled) Log(level Level, calldepth int, rec *Record) (err error) { - if l.IsEnabledFor(level, rec.Module) { - // TODO get rid of traces of formatter here. BackendFormatter should be used. - rec.formatter = l.getFormatterAndCacheCurrent() - err = l.backend.Log(level, calldepth+1, rec) - } - return -} - -func (l *moduleLeveled) getFormatterAndCacheCurrent() Formatter { - l.once.Do(func() { - if l.formatter == nil { - l.formatter = getFormatter() - } - }) - return l.formatter -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/log_nix.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/log_nix.go deleted file mode 100644 index 4ff2ab1..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/log_nix.go +++ /dev/null @@ -1,109 +0,0 @@ -// +build !windows - -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "fmt" - "io" - "log" -) - -type color int - -const ( - ColorBlack = iota + 30 - ColorRed - ColorGreen - ColorYellow - ColorBlue - ColorMagenta - ColorCyan - ColorWhite -) - -var ( - colors = []string{ - CRITICAL: ColorSeq(ColorMagenta), - ERROR: ColorSeq(ColorRed), - WARNING: ColorSeq(ColorYellow), - NOTICE: ColorSeq(ColorGreen), - DEBUG: ColorSeq(ColorCyan), - } - boldcolors = []string{ - CRITICAL: ColorSeqBold(ColorMagenta), - ERROR: ColorSeqBold(ColorRed), - WARNING: ColorSeqBold(ColorYellow), - NOTICE: ColorSeqBold(ColorGreen), - DEBUG: ColorSeqBold(ColorCyan), - } -) - -// LogBackend utilizes the standard log module. -type LogBackend struct { - Logger *log.Logger - Color bool - ColorConfig []string -} - -// NewLogBackend creates a new LogBackend. -func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend { - return &LogBackend{Logger: log.New(out, prefix, flag)} -} - -// Log implements the Backend interface. -func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error { - if b.Color { - col := colors[level] - if len(b.ColorConfig) > int(level) && b.ColorConfig[level] != "" { - col = b.ColorConfig[level] - } - - buf := &bytes.Buffer{} - buf.Write([]byte(col)) - buf.Write([]byte(rec.Formatted(calldepth + 1))) - buf.Write([]byte("\033[0m")) - // For some reason, the Go logger arbitrarily decided "2" was the correct - // call depth... - return b.Logger.Output(calldepth+2, buf.String()) - } - - return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1)) -} - -// ConvertColors takes a list of ints representing colors for log levels and -// converts them into strings for ANSI color formatting -func ConvertColors(colors []int, bold bool) []string { - converted := []string{} - for _, i := range colors { - if bold { - converted = append(converted, ColorSeqBold(color(i))) - } else { - converted = append(converted, ColorSeq(color(i))) - } - } - - return converted -} - -func ColorSeq(color color) string { - return fmt.Sprintf("\033[%dm", int(color)) -} - -func ColorSeqBold(color color) string { - return fmt.Sprintf("\033[%d;1m", int(color)) -} - -func doFmtVerbLevelColor(layout string, level Level, output io.Writer) { - if layout == "bold" { - output.Write([]byte(boldcolors[level])) - } else if layout == "reset" { - output.Write([]byte("\033[0m")) - } else { - output.Write([]byte(colors[level])) - } -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/log_windows.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/log_windows.go deleted file mode 100644 index b8dc92c..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/log_windows.go +++ /dev/null @@ -1,107 +0,0 @@ -// +build windows -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "io" - "log" - "syscall" -) - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") -) - -// Character attributes -// Note: -// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). -// Clearing all foreground or background colors results in black; setting all creates white. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. -const ( - fgBlack = 0x0000 - fgBlue = 0x0001 - fgGreen = 0x0002 - fgCyan = 0x0003 - fgRed = 0x0004 - fgMagenta = 0x0005 - fgYellow = 0x0006 - fgWhite = 0x0007 - fgIntensity = 0x0008 - fgMask = 0x000F -) - -var ( - colors = []uint16{ - INFO: fgWhite, - CRITICAL: fgMagenta, - ERROR: fgRed, - WARNING: fgYellow, - NOTICE: fgGreen, - DEBUG: fgCyan, - } - boldcolors = []uint16{ - INFO: fgWhite | fgIntensity, - CRITICAL: fgMagenta | fgIntensity, - ERROR: fgRed | fgIntensity, - WARNING: fgYellow | fgIntensity, - NOTICE: fgGreen | fgIntensity, - DEBUG: fgCyan | fgIntensity, - } -) - -type file interface { - Fd() uintptr -} - -// LogBackend utilizes the standard log module. -type LogBackend struct { - Logger *log.Logger - Color bool - - // f is set to a non-nil value if the underlying writer which logs writes to - // implements the file interface. This makes us able to colorise the output. - f file -} - -// NewLogBackend creates a new LogBackend. -func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend { - b := &LogBackend{Logger: log.New(out, prefix, flag)} - - // Unfortunately, the API used only takes an io.Writer where the Windows API - // need the actual fd to change colors. - if f, ok := out.(file); ok { - b.f = f - } - - return b -} - -func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error { - if b.Color && b.f != nil { - buf := &bytes.Buffer{} - setConsoleTextAttribute(b.f, colors[level]) - buf.Write([]byte(rec.Formatted(calldepth + 1))) - err := b.Logger.Output(calldepth+2, buf.String()) - setConsoleTextAttribute(b.f, fgWhite) - return err - } - return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1)) -} - -// setConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func setConsoleTextAttribute(f file, attribute uint16) bool { - ok, _, _ := setConsoleTextAttributeProc.Call(f.Fd(), uintptr(attribute), 0) - return ok != 0 -} - -func doFmtVerbLevelColor(layout string, level Level, output io.Writer) { - // TODO not supported on Windows since the io.Writer here is actually a - // bytes.Buffer. -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/logger.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/logger.go deleted file mode 100644 index 535ed9b..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/logger.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package logging implements a logging infrastructure for Go. It supports -// different logging backends like syslog, file and memory. Multiple backends -// can be utilized with different log levels per backend and logger. -package logging - -import ( - "bytes" - "fmt" - "log" - "os" - "strings" - "sync/atomic" - "time" -) - -// Redactor is an interface for types that may contain sensitive information -// (like passwords), which shouldn't be printed to the log. The idea was found -// in relog as part of the vitness project. -type Redactor interface { - Redacted() interface{} -} - -// Redact returns a string of * having the same length as s. -func Redact(s string) string { - return strings.Repeat("*", len(s)) -} - -var ( - // Sequence number is incremented and utilized for all log records created. - sequenceNo uint64 - - // timeNow is a customizable for testing purposes. - timeNow = time.Now -) - -// Record represents a log record and contains the timestamp when the record -// was created, an increasing id, filename and line and finally the actual -// formatted log line. -type Record struct { - ID uint64 - Time time.Time - Module string - Level Level - Args []interface{} - - // message is kept as a pointer to have shallow copies update this once - // needed. - message *string - fmt *string - formatter Formatter - formatted string -} - -// Formatted returns the formatted log record string. -func (r *Record) Formatted(calldepth int) string { - if r.formatted == "" { - var buf bytes.Buffer - r.formatter.Format(calldepth+1, r, &buf) - r.formatted = buf.String() - } - return r.formatted -} - -// Message returns the log record message. -func (r *Record) Message() string { - if r.message == nil { - // Redact the arguments that implements the Redactor interface - for i, arg := range r.Args { - if redactor, ok := arg.(Redactor); ok == true { - r.Args[i] = redactor.Redacted() - } - } - var buf bytes.Buffer - if r.fmt != nil { - fmt.Fprintf(&buf, *r.fmt, r.Args...) - } else { - // use Fprintln to make sure we always get space between arguments - fmt.Fprintln(&buf, r.Args...) - buf.Truncate(buf.Len() - 1) // strip newline - } - msg := buf.String() - r.message = &msg - } - return *r.message -} - -// Logger is the actual logger which creates log records based on the functions -// called and passes them to the underlying logging backend. -type Logger struct { - Module string - backend LeveledBackend - haveBackend bool - - // ExtraCallDepth can be used to add additional call depth when getting the - // calling function. This is normally used when wrapping a logger. - ExtraCalldepth int -} - -// SetBackend overrides any previously defined backend for this logger. -func (l *Logger) SetBackend(backend LeveledBackend) { - l.backend = backend - l.haveBackend = true -} - -// TODO call NewLogger and remove MustGetLogger? - -// GetLogger creates and returns a Logger object based on the module name. -func GetLogger(module string) (*Logger, error) { - return &Logger{Module: module}, nil -} - -// MustGetLogger is like GetLogger but panics if the logger can't be created. -// It simplifies safe initialization of a global logger for eg. a package. -func MustGetLogger(module string) *Logger { - logger, err := GetLogger(module) - if err != nil { - panic("logger: " + module + ": " + err.Error()) - } - return logger -} - -// Reset restores the internal state of the logging library. -func Reset() { - // TODO make a global Init() method to be less magic? or make it such that - // if there's no backends at all configured, we could use some tricks to - // automatically setup backends based if we have a TTY or not. - sequenceNo = 0 - b := SetBackend(NewLogBackend(os.Stderr, "", log.LstdFlags)) - b.SetLevel(DEBUG, "") - SetFormatter(DefaultFormatter) - timeNow = time.Now -} - -// IsEnabledFor returns true if the logger is enabled for the given level. -func (l *Logger) IsEnabledFor(level Level) bool { - return defaultBackend.IsEnabledFor(level, l.Module) -} - -func (l *Logger) log(lvl Level, format *string, args ...interface{}) { - if !l.IsEnabledFor(lvl) { - return - } - - // Create the logging record and pass it in to the backend - record := &Record{ - ID: atomic.AddUint64(&sequenceNo, 1), - Time: timeNow(), - Module: l.Module, - Level: lvl, - fmt: format, - Args: args, - } - - // TODO use channels to fan out the records to all backends? - // TODO in case of errors, do something (tricky) - - // calldepth=2 brings the stack up to the caller of the level - // methods, Info(), Fatal(), etc. - // ExtraCallDepth allows this to be extended further up the stack in case we - // are wrapping these methods, eg. to expose them package level - if l.haveBackend { - l.backend.Log(lvl, 2+l.ExtraCalldepth, record) - return - } - - defaultBackend.Log(lvl, 2+l.ExtraCalldepth, record) -} - -// Fatal is equivalent to l.Critical(fmt.Sprint()) followed by a call to os.Exit(1). -func (l *Logger) Fatal(args ...interface{}) { - l.log(CRITICAL, nil, args...) - os.Exit(1) -} - -// Fatalf is equivalent to l.Critical followed by a call to os.Exit(1). -func (l *Logger) Fatalf(format string, args ...interface{}) { - l.log(CRITICAL, &format, args...) - os.Exit(1) -} - -// Panic is equivalent to l.Critical(fmt.Sprint()) followed by a call to panic(). -func (l *Logger) Panic(args ...interface{}) { - l.log(CRITICAL, nil, args...) - panic(fmt.Sprint(args...)) -} - -// Panicf is equivalent to l.Critical followed by a call to panic(). -func (l *Logger) Panicf(format string, args ...interface{}) { - l.log(CRITICAL, &format, args...) - panic(fmt.Sprintf(format, args...)) -} - -// Critical logs a message using CRITICAL as log level. -func (l *Logger) Critical(args ...interface{}) { - l.log(CRITICAL, nil, args...) -} - -// Criticalf logs a message using CRITICAL as log level. -func (l *Logger) Criticalf(format string, args ...interface{}) { - l.log(CRITICAL, &format, args...) -} - -// Error logs a message using ERROR as log level. -func (l *Logger) Error(args ...interface{}) { - l.log(ERROR, nil, args...) -} - -// Errorf logs a message using ERROR as log level. -func (l *Logger) Errorf(format string, args ...interface{}) { - l.log(ERROR, &format, args...) -} - -// Warning logs a message using WARNING as log level. -func (l *Logger) Warning(args ...interface{}) { - l.log(WARNING, nil, args...) -} - -// Warningf logs a message using WARNING as log level. -func (l *Logger) Warningf(format string, args ...interface{}) { - l.log(WARNING, &format, args...) -} - -// Notice logs a message using NOTICE as log level. -func (l *Logger) Notice(args ...interface{}) { - l.log(NOTICE, nil, args...) -} - -// Noticef logs a message using NOTICE as log level. -func (l *Logger) Noticef(format string, args ...interface{}) { - l.log(NOTICE, &format, args...) -} - -// Info logs a message using INFO as log level. -func (l *Logger) Info(args ...interface{}) { - l.log(INFO, nil, args...) -} - -// Infof logs a message using INFO as log level. -func (l *Logger) Infof(format string, args ...interface{}) { - l.log(INFO, &format, args...) -} - -// Debug logs a message using DEBUG as log level. -func (l *Logger) Debug(args ...interface{}) { - l.log(DEBUG, nil, args...) -} - -// Debugf logs a message using DEBUG as log level. -func (l *Logger) Debugf(format string, args ...interface{}) { - l.log(DEBUG, &format, args...) -} - -func init() { - Reset() -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/memory.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/memory.go deleted file mode 100644 index 8d5152c..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/memory.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -package logging - -import ( - "sync" - "sync/atomic" - "time" - "unsafe" -) - -// TODO pick one of the memory backends and stick with it or share interface. - -// InitForTesting is a convenient method when using logging in a test. Once -// called, the time will be frozen to January 1, 1970 UTC. -func InitForTesting(level Level) *MemoryBackend { - Reset() - - memoryBackend := NewMemoryBackend(10240) - - leveledBackend := AddModuleLevel(memoryBackend) - leveledBackend.SetLevel(level, "") - SetBackend(leveledBackend) - - timeNow = func() time.Time { - return time.Unix(0, 0).UTC() - } - return memoryBackend -} - -// Node is a record node pointing to an optional next node. -type node struct { - next *node - Record *Record -} - -// Next returns the next record node. If there's no node available, it will -// return nil. -func (n *node) Next() *node { - return n.next -} - -// MemoryBackend is a simple memory based logging backend that will not produce -// any output but merly keep records, up to the given size, in memory. -type MemoryBackend struct { - size int32 - maxSize int32 - head, tail unsafe.Pointer -} - -// NewMemoryBackend creates a simple in-memory logging backend. -func NewMemoryBackend(size int) *MemoryBackend { - return &MemoryBackend{maxSize: int32(size)} -} - -// Log implements the Log method required by Backend. -func (b *MemoryBackend) Log(level Level, calldepth int, rec *Record) error { - var size int32 - - n := &node{Record: rec} - np := unsafe.Pointer(n) - - // Add the record to the tail. If there's no records available, tail and - // head will both be nil. When we successfully set the tail and the previous - // value was nil, it's safe to set the head to the current value too. - for { - tailp := b.tail - swapped := atomic.CompareAndSwapPointer( - &b.tail, - tailp, - np, - ) - if swapped == true { - if tailp == nil { - b.head = np - } else { - (*node)(tailp).next = n - } - size = atomic.AddInt32(&b.size, 1) - break - } - } - - // Since one record was added, we might have overflowed the list. Remove - // a record if that is the case. The size will fluctate a bit, but - // eventual consistent. - if b.maxSize > 0 && size > b.maxSize { - for { - headp := b.head - head := (*node)(b.head) - if head.next == nil { - break - } - swapped := atomic.CompareAndSwapPointer( - &b.head, - headp, - unsafe.Pointer(head.next), - ) - if swapped == true { - atomic.AddInt32(&b.size, -1) - break - } - } - } - return nil -} - -// Head returns the oldest record node kept in memory. It can be used to -// iterate over records, one by one, up to the last record. -// -// Note: new records can get added while iterating. Hence the number of records -// iterated over might be larger than the maximum size. -func (b *MemoryBackend) Head() *node { - return (*node)(b.head) -} - -type event int - -const ( - eventFlush event = iota - eventStop -) - -// ChannelMemoryBackend is very similar to the MemoryBackend, except that it -// internally utilizes a channel. -type ChannelMemoryBackend struct { - maxSize int - size int - incoming chan *Record - events chan event - mu sync.Mutex - running bool - flushWg sync.WaitGroup - stopWg sync.WaitGroup - head, tail *node -} - -// NewChannelMemoryBackend creates a simple in-memory logging backend which -// utilizes a go channel for communication. -// -// Start will automatically be called by this function. -func NewChannelMemoryBackend(size int) *ChannelMemoryBackend { - backend := &ChannelMemoryBackend{ - maxSize: size, - incoming: make(chan *Record, 1024), - events: make(chan event), - } - backend.Start() - return backend -} - -// Start launches the internal goroutine which starts processing data from the -// input channel. -func (b *ChannelMemoryBackend) Start() { - b.mu.Lock() - defer b.mu.Unlock() - - // Launch the goroutine unless it's already running. - if b.running != true { - b.running = true - b.stopWg.Add(1) - go b.process() - } -} - -func (b *ChannelMemoryBackend) process() { - defer b.stopWg.Done() - for { - select { - case rec := <-b.incoming: - b.insertRecord(rec) - case e := <-b.events: - switch e { - case eventStop: - return - case eventFlush: - for len(b.incoming) > 0 { - b.insertRecord(<-b.incoming) - } - b.flushWg.Done() - } - } - } -} - -func (b *ChannelMemoryBackend) insertRecord(rec *Record) { - prev := b.tail - b.tail = &node{Record: rec} - if prev == nil { - b.head = b.tail - } else { - prev.next = b.tail - } - - if b.maxSize > 0 && b.size >= b.maxSize { - b.head = b.head.next - } else { - b.size++ - } -} - -// Flush waits until all records in the buffered channel have been processed. -func (b *ChannelMemoryBackend) Flush() { - b.flushWg.Add(1) - b.events <- eventFlush - b.flushWg.Wait() -} - -// Stop signals the internal goroutine to exit and waits until it have. -func (b *ChannelMemoryBackend) Stop() { - b.mu.Lock() - if b.running == true { - b.running = false - b.events <- eventStop - } - b.mu.Unlock() - b.stopWg.Wait() -} - -// Log implements the Log method required by Backend. -func (b *ChannelMemoryBackend) Log(level Level, calldepth int, rec *Record) error { - b.incoming <- rec - return nil -} - -// Head returns the oldest record node kept in memory. It can be used to -// iterate over records, one by one, up to the last record. -// -// Note: new records can get added while iterating. Hence the number of records -// iterated over might be larger than the maximum size. -func (b *ChannelMemoryBackend) Head() *node { - return b.head -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/multi.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/multi.go deleted file mode 100644 index 3731653..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/multi.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -// TODO remove Level stuff from the multi logger. Do one thing. - -// multiLogger is a log multiplexer which can be used to utilize multiple log -// backends at once. -type multiLogger struct { - backends []LeveledBackend -} - -// MultiLogger creates a logger which contain multiple loggers. -func MultiLogger(backends ...Backend) LeveledBackend { - var leveledBackends []LeveledBackend - for _, backend := range backends { - leveledBackends = append(leveledBackends, AddModuleLevel(backend)) - } - return &multiLogger{leveledBackends} -} - -// Log passes the log record to all backends. -func (b *multiLogger) Log(level Level, calldepth int, rec *Record) (err error) { - for _, backend := range b.backends { - if backend.IsEnabledFor(level, rec.Module) { - // Shallow copy of the record for the formatted cache on Record and get the - // record formatter from the backend. - r2 := *rec - if e := backend.Log(level, calldepth+1, &r2); e != nil { - err = e - } - } - } - return -} - -// GetLevel returns the highest level enabled by all backends. -func (b *multiLogger) GetLevel(module string) Level { - var level Level - for _, backend := range b.backends { - if backendLevel := backend.GetLevel(module); backendLevel > level { - level = backendLevel - } - } - return level -} - -// SetLevel propagates the same level to all backends. -func (b *multiLogger) SetLevel(level Level, module string) { - for _, backend := range b.backends { - backend.SetLevel(level, module) - } -} - -// IsEnabledFor returns true if any of the backends are enabled for it. -func (b *multiLogger) IsEnabledFor(level Level, module string) bool { - for _, backend := range b.backends { - if backend.IsEnabledFor(level, module) { - return true - } - } - return false -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/syslog.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/syslog.go deleted file mode 100644 index 4faa531..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/syslog.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !windows,!plan9 - -package logging - -import "log/syslog" - -// SyslogBackend is a simple logger to syslog backend. It automatically maps -// the internal log levels to appropriate syslog log levels. -type SyslogBackend struct { - Writer *syslog.Writer -} - -// NewSyslogBackend connects to the syslog daemon using UNIX sockets with the -// given prefix. If prefix is not given, the prefix will be derived from the -// launched command. -func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) { - var w *syslog.Writer - w, err = syslog.New(syslog.LOG_CRIT, prefix) - return &SyslogBackend{w}, err -} - -// NewSyslogBackendPriority is the same as NewSyslogBackend, but with custom -// syslog priority, like syslog.LOG_LOCAL3|syslog.LOG_DEBUG etc. -func NewSyslogBackendPriority(prefix string, priority syslog.Priority) (b *SyslogBackend, err error) { - var w *syslog.Writer - w, err = syslog.New(priority, prefix) - return &SyslogBackend{w}, err -} - -// Log implements the Backend interface. -func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error { - line := rec.Formatted(calldepth + 1) - switch level { - case CRITICAL: - return b.Writer.Crit(line) - case ERROR: - return b.Writer.Err(line) - case WARNING: - return b.Writer.Warning(line) - case NOTICE: - return b.Writer.Notice(line) - case INFO: - return b.Writer.Info(line) - case DEBUG: - return b.Writer.Debug(line) - default: - } - panic("unhandled log level") -} diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/syslog_fallback.go b/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/syslog_fallback.go deleted file mode 100644 index 91bc18d..0000000 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/syslog_fallback.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build windows plan9 - -package logging - -import ( - "fmt" -) - -type Priority int - -type SyslogBackend struct { -} - -func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) { - return nil, fmt.Errorf("Platform does not support syslog") -} - -func NewSyslogBackendPriority(prefix string, priority Priority) (b *SyslogBackend, err error) { - return nil, fmt.Errorf("Platform does not support syslog") -} - -func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error { - return fmt.Errorf("Platform does not support syslog") -} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7..0000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c2..0000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index 7be0cc7..0000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,36 +0,0 @@ -Protocol Buffers for Go with Gadgets - -Copyright (c) 2013, The GoGo Authors. All rights reserved. -http://github.com/gogo/protobuf - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile deleted file mode 100644 index 0b4659b..0000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Protocol Buffers for Go with Gadgets -# -# Copyright (c) 2013, The GoGo Authors. All rights reserved. -# http://github.com/gogo/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo - protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto - -restore: - cp gogo.pb.golden gogo.pb.go - -preserve: - cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go deleted file mode 100644 index 147b5ec..0000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package gogoproto provides extensions for protocol buffers to achieve: - - - fast marshalling and unmarshalling. - - peace of mind by optionally generating test and benchmark code. - - more canonical Go structures. - - less typing by optionally generating extra helper code. - - goprotobuf compatibility - -More Canonical Go Structures - -A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. -You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. -Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. - - - nullable, if false, a field is generated without a pointer (see warning below). - - embed, if true, the field is generated as an embedded field. - - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 - - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. - - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. - - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - -Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. - -Let us look at: - - github.com/gogo/protobuf/test/example/example.proto - -for a quicker overview. - -The following message: - - package test; - - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - - message A { - optional string Description = 1 [(gogoproto.nullable) = false]; - optional int64 Number = 2 [(gogoproto.nullable) = false]; - optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; - } - -Will generate a go struct which looks a lot like this: - - type A struct { - Description string - Number int64 - Id github_com_gogo_protobuf_test_custom.Uuid - } - -You will see there are no pointers, since all fields are non-nullable. -You will also see a custom type which marshals to a string. -Be warned it is your responsibility to test your custom types thoroughly. -You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. - -Next we will embed the message A in message B. - - message B { - optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; - repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; - } - -See below that A is embedded in B. - - type B struct { - A - G []github_com_gogo_protobuf_test_custom.Uint128 - } - -Also see the repeated custom type. - - type Uint128 [2]uint64 - -Next we will create a custom name for one of our fields. - - message C { - optional int64 size = 1 [(gogoproto.customname) = "MySize"]; - } - -See below that the field's name is MySize and not Size. - - type C struct { - MySize *int64 - } - -The is useful when having a protocol buffer message with a field name which conflicts with a generated method. -As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. -Using customname you can fix this error without changing the field name. -This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. - -Gogoprotobuf also has some more subtle changes, these could be changed back: - - - the generated package name for imports do not have the extra /filename.pb, - but are actually the imports specified in the .proto file. - -Gogoprotobuf also has lost some features which should be brought back with time: - - - Marshalling and unmarshalling with reflect and without the unsafe package, - this requires work in pointer_reflect.go - -Why does nullable break protocol buffer specifications: - -The protocol buffer specification states, somewhere, that you should be able to tell whether a -field is set or unset. With the option nullable=false this feature is lost, -since your non-nullable fields will always be set. It can be seen as a layer on top of -protocol buffers, where before and after marshalling all non-nullable fields are set -and they cannot be unset. - -Goprotobuf Compatibility: - -Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. -Gogoprotobuf generates the same code as goprotobuf if no extensions are used. -The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: - - - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. - - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix - - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. - - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face - - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. - - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension - - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. - - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). - -Less Typing and Peace of Mind is explained in their specific plugin folders godoc: - - - github.com/gogo/protobuf/plugin/ - -If you do not use any of these extension the code that is generated -will be the same as if goprotobuf has generated it. - -The most complete way to see examples is to look at - - github.com/gogo/protobuf/test/thetest.proto - -Gogoprototest is a seperate project, -because we want to keep gogoprotobuf independant of goprotobuf, -but we still want to test it thoroughly. - -*/ -package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go deleted file mode 100644 index 523afd2..0000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ /dev/null @@ -1,825 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: gogo.proto - -/* -Package gogoproto is a generated protocol buffer package. - -It is generated from these files: - gogo.proto - -It has these top-level messages: -*/ -package gogoproto - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62001, - Name: "gogoproto.goproto_enum_prefix", - Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62021, - Name: "gogoproto.goproto_enum_stringer", - Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", - Filename: "gogo.proto", -} - -var E_EnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62022, - Name: "gogoproto.enum_stringer", - Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", - Filename: "gogo.proto", -} - -var E_EnumCustomname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), - ExtensionType: (*string)(nil), - Field: 62023, - Name: "gogoproto.enum_customname", - Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", - Filename: "gogo.proto", -} - -var E_Enumdecl = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62024, - Name: "gogoproto.enumdecl", - Tag: "varint,62024,opt,name=enumdecl", - Filename: "gogo.proto", -} - -var E_EnumvalueCustomname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumValueOptions)(nil), - ExtensionType: (*string)(nil), - Field: 66001, - Name: "gogoproto.enumvalue_customname", - Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", - Filename: "gogo.proto", -} - -var E_GoprotoGettersAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63001, - Name: "gogoproto.goproto_getters_all", - Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", - Filename: "gogo.proto", -} - -var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63002, - Name: "gogoproto.goproto_enum_prefix_all", - Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", - Filename: "gogo.proto", -} - -var E_GoprotoStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63003, - Name: "gogoproto.goproto_stringer_all", - Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", - Filename: "gogo.proto", -} - -var E_VerboseEqualAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63004, - Name: "gogoproto.verbose_equal_all", - Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", - Filename: "gogo.proto", -} - -var E_FaceAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63005, - Name: "gogoproto.face_all", - Tag: "varint,63005,opt,name=face_all,json=faceAll", - Filename: "gogo.proto", -} - -var E_GostringAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63006, - Name: "gogoproto.gostring_all", - Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", - Filename: "gogo.proto", -} - -var E_PopulateAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63007, - Name: "gogoproto.populate_all", - Tag: "varint,63007,opt,name=populate_all,json=populateAll", - Filename: "gogo.proto", -} - -var E_StringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63008, - Name: "gogoproto.stringer_all", - Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", - Filename: "gogo.proto", -} - -var E_OnlyoneAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63009, - Name: "gogoproto.onlyone_all", - Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", - Filename: "gogo.proto", -} - -var E_EqualAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63013, - Name: "gogoproto.equal_all", - Tag: "varint,63013,opt,name=equal_all,json=equalAll", - Filename: "gogo.proto", -} - -var E_DescriptionAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63014, - Name: "gogoproto.description_all", - Tag: "varint,63014,opt,name=description_all,json=descriptionAll", - Filename: "gogo.proto", -} - -var E_TestgenAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63015, - Name: "gogoproto.testgen_all", - Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", - Filename: "gogo.proto", -} - -var E_BenchgenAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63016, - Name: "gogoproto.benchgen_all", - Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", - Filename: "gogo.proto", -} - -var E_MarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63017, - Name: "gogoproto.marshaler_all", - Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", - Filename: "gogo.proto", -} - -var E_UnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63018, - Name: "gogoproto.unmarshaler_all", - Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", - Filename: "gogo.proto", -} - -var E_StableMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63019, - Name: "gogoproto.stable_marshaler_all", - Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", - Filename: "gogo.proto", -} - -var E_SizerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63020, - Name: "gogoproto.sizer_all", - Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63021, - Name: "gogoproto.goproto_enum_stringer_all", - Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", - Filename: "gogo.proto", -} - -var E_EnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63022, - Name: "gogoproto.enum_stringer_all", - Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", - Filename: "gogo.proto", -} - -var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63023, - Name: "gogoproto.unsafe_marshaler_all", - Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63024, - Name: "gogoproto.unsafe_unmarshaler_all", - Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63025, - Name: "gogoproto.goproto_extensions_map_all", - Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63026, - Name: "gogoproto.goproto_unrecognized_all", - Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", - Filename: "gogo.proto", -} - -var E_GogoprotoImport = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63027, - Name: "gogoproto.gogoproto_import", - Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", - Filename: "gogo.proto", -} - -var E_ProtosizerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63028, - Name: "gogoproto.protosizer_all", - Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", - Filename: "gogo.proto", -} - -var E_CompareAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63029, - Name: "gogoproto.compare_all", - Tag: "varint,63029,opt,name=compare_all,json=compareAll", - Filename: "gogo.proto", -} - -var E_TypedeclAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63030, - Name: "gogoproto.typedecl_all", - Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", - Filename: "gogo.proto", -} - -var E_EnumdeclAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63031, - Name: "gogoproto.enumdecl_all", - Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", - Filename: "gogo.proto", -} - -var E_GoprotoRegistration = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63032, - Name: "gogoproto.goproto_registration", - Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", - Filename: "gogo.proto", -} - -var E_MessagenameAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63033, - Name: "gogoproto.messagename_all", - Tag: "varint,63033,opt,name=messagename_all,json=messagenameAll", - Filename: "gogo.proto", -} - -var E_GoprotoGetters = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64001, - Name: "gogoproto.goproto_getters", - Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", - Filename: "gogo.proto", -} - -var E_GoprotoStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64003, - Name: "gogoproto.goproto_stringer", - Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", - Filename: "gogo.proto", -} - -var E_VerboseEqual = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64004, - Name: "gogoproto.verbose_equal", - Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", - Filename: "gogo.proto", -} - -var E_Face = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64005, - Name: "gogoproto.face", - Tag: "varint,64005,opt,name=face", - Filename: "gogo.proto", -} - -var E_Gostring = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64006, - Name: "gogoproto.gostring", - Tag: "varint,64006,opt,name=gostring", - Filename: "gogo.proto", -} - -var E_Populate = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64007, - Name: "gogoproto.populate", - Tag: "varint,64007,opt,name=populate", - Filename: "gogo.proto", -} - -var E_Stringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 67008, - Name: "gogoproto.stringer", - Tag: "varint,67008,opt,name=stringer", - Filename: "gogo.proto", -} - -var E_Onlyone = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64009, - Name: "gogoproto.onlyone", - Tag: "varint,64009,opt,name=onlyone", - Filename: "gogo.proto", -} - -var E_Equal = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64013, - Name: "gogoproto.equal", - Tag: "varint,64013,opt,name=equal", - Filename: "gogo.proto", -} - -var E_Description = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64014, - Name: "gogoproto.description", - Tag: "varint,64014,opt,name=description", - Filename: "gogo.proto", -} - -var E_Testgen = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64015, - Name: "gogoproto.testgen", - Tag: "varint,64015,opt,name=testgen", - Filename: "gogo.proto", -} - -var E_Benchgen = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64016, - Name: "gogoproto.benchgen", - Tag: "varint,64016,opt,name=benchgen", - Filename: "gogo.proto", -} - -var E_Marshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64017, - Name: "gogoproto.marshaler", - Tag: "varint,64017,opt,name=marshaler", - Filename: "gogo.proto", -} - -var E_Unmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64018, - Name: "gogoproto.unmarshaler", - Tag: "varint,64018,opt,name=unmarshaler", - Filename: "gogo.proto", -} - -var E_StableMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64019, - Name: "gogoproto.stable_marshaler", - Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", - Filename: "gogo.proto", -} - -var E_Sizer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64020, - Name: "gogoproto.sizer", - Tag: "varint,64020,opt,name=sizer", - Filename: "gogo.proto", -} - -var E_UnsafeMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64023, - Name: "gogoproto.unsafe_marshaler", - Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64024, - Name: "gogoproto.unsafe_unmarshaler", - Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64025, - Name: "gogoproto.goproto_extensions_map", - Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognized = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64026, - Name: "gogoproto.goproto_unrecognized", - Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", - Filename: "gogo.proto", -} - -var E_Protosizer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64028, - Name: "gogoproto.protosizer", - Tag: "varint,64028,opt,name=protosizer", - Filename: "gogo.proto", -} - -var E_Compare = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64029, - Name: "gogoproto.compare", - Tag: "varint,64029,opt,name=compare", - Filename: "gogo.proto", -} - -var E_Typedecl = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64030, - Name: "gogoproto.typedecl", - Tag: "varint,64030,opt,name=typedecl", - Filename: "gogo.proto", -} - -var E_Messagename = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64033, - Name: "gogoproto.messagename", - Tag: "varint,64033,opt,name=messagename", - Filename: "gogo.proto", -} - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65001, - Name: "gogoproto.nullable", - Tag: "varint,65001,opt,name=nullable", - Filename: "gogo.proto", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65002, - Name: "gogoproto.embed", - Tag: "varint,65002,opt,name=embed", - Filename: "gogo.proto", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65003, - Name: "gogoproto.customtype", - Tag: "bytes,65003,opt,name=customtype", - Filename: "gogo.proto", -} - -var E_Customname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65004, - Name: "gogoproto.customname", - Tag: "bytes,65004,opt,name=customname", - Filename: "gogo.proto", -} - -var E_Jsontag = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65005, - Name: "gogoproto.jsontag", - Tag: "bytes,65005,opt,name=jsontag", - Filename: "gogo.proto", -} - -var E_Moretags = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65006, - Name: "gogoproto.moretags", - Tag: "bytes,65006,opt,name=moretags", - Filename: "gogo.proto", -} - -var E_Casttype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65007, - Name: "gogoproto.casttype", - Tag: "bytes,65007,opt,name=casttype", - Filename: "gogo.proto", -} - -var E_Castkey = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65008, - Name: "gogoproto.castkey", - Tag: "bytes,65008,opt,name=castkey", - Filename: "gogo.proto", -} - -var E_Castvalue = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65009, - Name: "gogoproto.castvalue", - Tag: "bytes,65009,opt,name=castvalue", - Filename: "gogo.proto", -} - -var E_Stdtime = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65010, - Name: "gogoproto.stdtime", - Tag: "varint,65010,opt,name=stdtime", - Filename: "gogo.proto", -} - -var E_Stdduration = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65011, - Name: "gogoproto.stdduration", - Tag: "varint,65011,opt,name=stdduration", - Filename: "gogo.proto", -} - -func init() { - proto.RegisterExtension(E_GoprotoEnumPrefix) - proto.RegisterExtension(E_GoprotoEnumStringer) - proto.RegisterExtension(E_EnumStringer) - proto.RegisterExtension(E_EnumCustomname) - proto.RegisterExtension(E_Enumdecl) - proto.RegisterExtension(E_EnumvalueCustomname) - proto.RegisterExtension(E_GoprotoGettersAll) - proto.RegisterExtension(E_GoprotoEnumPrefixAll) - proto.RegisterExtension(E_GoprotoStringerAll) - proto.RegisterExtension(E_VerboseEqualAll) - proto.RegisterExtension(E_FaceAll) - proto.RegisterExtension(E_GostringAll) - proto.RegisterExtension(E_PopulateAll) - proto.RegisterExtension(E_StringerAll) - proto.RegisterExtension(E_OnlyoneAll) - proto.RegisterExtension(E_EqualAll) - proto.RegisterExtension(E_DescriptionAll) - proto.RegisterExtension(E_TestgenAll) - proto.RegisterExtension(E_BenchgenAll) - proto.RegisterExtension(E_MarshalerAll) - proto.RegisterExtension(E_UnmarshalerAll) - proto.RegisterExtension(E_StableMarshalerAll) - proto.RegisterExtension(E_SizerAll) - proto.RegisterExtension(E_GoprotoEnumStringerAll) - proto.RegisterExtension(E_EnumStringerAll) - proto.RegisterExtension(E_UnsafeMarshalerAll) - proto.RegisterExtension(E_UnsafeUnmarshalerAll) - proto.RegisterExtension(E_GoprotoExtensionsMapAll) - proto.RegisterExtension(E_GoprotoUnrecognizedAll) - proto.RegisterExtension(E_GogoprotoImport) - proto.RegisterExtension(E_ProtosizerAll) - proto.RegisterExtension(E_CompareAll) - proto.RegisterExtension(E_TypedeclAll) - proto.RegisterExtension(E_EnumdeclAll) - proto.RegisterExtension(E_GoprotoRegistration) - proto.RegisterExtension(E_MessagenameAll) - proto.RegisterExtension(E_GoprotoGetters) - proto.RegisterExtension(E_GoprotoStringer) - proto.RegisterExtension(E_VerboseEqual) - proto.RegisterExtension(E_Face) - proto.RegisterExtension(E_Gostring) - proto.RegisterExtension(E_Populate) - proto.RegisterExtension(E_Stringer) - proto.RegisterExtension(E_Onlyone) - proto.RegisterExtension(E_Equal) - proto.RegisterExtension(E_Description) - proto.RegisterExtension(E_Testgen) - proto.RegisterExtension(E_Benchgen) - proto.RegisterExtension(E_Marshaler) - proto.RegisterExtension(E_Unmarshaler) - proto.RegisterExtension(E_StableMarshaler) - proto.RegisterExtension(E_Sizer) - proto.RegisterExtension(E_UnsafeMarshaler) - proto.RegisterExtension(E_UnsafeUnmarshaler) - proto.RegisterExtension(E_GoprotoExtensionsMap) - proto.RegisterExtension(E_GoprotoUnrecognized) - proto.RegisterExtension(E_Protosizer) - proto.RegisterExtension(E_Compare) - proto.RegisterExtension(E_Typedecl) - proto.RegisterExtension(E_Messagename) - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) - proto.RegisterExtension(E_Customname) - proto.RegisterExtension(E_Jsontag) - proto.RegisterExtension(E_Moretags) - proto.RegisterExtension(E_Casttype) - proto.RegisterExtension(E_Castkey) - proto.RegisterExtension(E_Castvalue) - proto.RegisterExtension(E_Stdtime) - proto.RegisterExtension(E_Stdduration) -} - -func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } - -var fileDescriptorGogo = []byte{ - // 1246 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, - 0x14, 0x80, 0x85, 0x70, 0x64, 0xcf, 0xf3, 0x86, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, - 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x06, 0x63, 0xe2, 0xb0, 0x1d, 0x46, - 0x3d, 0x33, 0xe5, 0x76, 0x43, 0x77, 0xd7, 0xd0, 0x5d, 0x1d, 0xc5, 0xb9, 0xa1, 0xb0, 0x08, 0x21, - 0x76, 0x24, 0x48, 0x48, 0x02, 0x39, 0xb0, 0xaf, 0x61, 0xe7, 0xc6, 0x85, 0xe5, 0xca, 0x7f, 0xe0, - 0x02, 0x98, 0xdd, 0x37, 0x5f, 0xa2, 0xd7, 0xfd, 0x5e, 0x4f, 0xcd, 0x78, 0xa4, 0xaa, 0xb9, 0xb5, - 0xed, 0xfa, 0x3e, 0x57, 0xbf, 0x57, 0xf5, 0xde, 0x9b, 0x01, 0xf0, 0x95, 0xaf, 0x66, 0x5a, 0x89, - 0xd2, 0xaa, 0x5a, 0xc1, 0xe7, 0xfc, 0xf1, 0xc0, 0x41, 0x5f, 0x29, 0x3f, 0x94, 0xb3, 0xf9, 0x4f, - 0xf5, 0x6c, 0x63, 0xb6, 0x29, 0xd3, 0x46, 0x12, 0xb4, 0xb4, 0x4a, 0x8a, 0xc5, 0xe2, 0x6e, 0x98, - 0xa4, 0xc5, 0x35, 0x19, 0x67, 0x51, 0xad, 0x95, 0xc8, 0x8d, 0xe0, 0x74, 0xf5, 0xa6, 0x99, 0x82, - 0x9c, 0x61, 0x72, 0x66, 0x29, 0xce, 0xa2, 0x7b, 0x5a, 0x3a, 0x50, 0x71, 0xba, 0xff, 0xca, 0xaf, - 0xd7, 0x1e, 0xbc, 0xe6, 0xf6, 0xa1, 0xb5, 0x09, 0x42, 0xf1, 0x6f, 0xab, 0x39, 0x28, 0xd6, 0xe0, - 0xfa, 0x0e, 0x5f, 0xaa, 0x93, 0x20, 0xf6, 0x65, 0x62, 0x31, 0xfe, 0x40, 0xc6, 0x49, 0xc3, 0x78, - 0x1f, 0xa1, 0x62, 0x11, 0x46, 0xfb, 0x71, 0xfd, 0x48, 0xae, 0x11, 0x69, 0x4a, 0x96, 0x61, 0x3c, - 0x97, 0x34, 0xb2, 0x54, 0xab, 0x28, 0xf6, 0x22, 0x69, 0xd1, 0xfc, 0x94, 0x6b, 0x2a, 0x6b, 0x63, - 0x88, 0x2d, 0x96, 0x94, 0x10, 0x30, 0x84, 0xbf, 0x69, 0xca, 0x46, 0x68, 0x31, 0xfc, 0x4c, 0x1b, - 0x29, 0xd7, 0x8b, 0x93, 0x30, 0x85, 0xcf, 0xa7, 0xbc, 0x30, 0x93, 0xe6, 0x4e, 0x6e, 0xed, 0xe9, - 0x39, 0x89, 0xcb, 0x58, 0xf6, 0xcb, 0xd9, 0x81, 0x7c, 0x3b, 0x93, 0xa5, 0xc0, 0xd8, 0x93, 0x91, - 0x45, 0x5f, 0x6a, 0x2d, 0x93, 0xb4, 0xe6, 0x85, 0xbd, 0xb6, 0x77, 0x2c, 0x08, 0x4b, 0xe3, 0xb9, - 0xed, 0xce, 0x2c, 0x2e, 0x17, 0xe4, 0x42, 0x18, 0x8a, 0x75, 0xb8, 0xa1, 0xc7, 0xa9, 0x70, 0x70, - 0x9e, 0x27, 0xe7, 0xd4, 0x9e, 0x93, 0x81, 0xda, 0x55, 0xe0, 0xdf, 0x97, 0xb9, 0x74, 0x70, 0xbe, - 0x41, 0xce, 0x2a, 0xb1, 0x9c, 0x52, 0x34, 0xde, 0x09, 0x13, 0xa7, 0x64, 0x52, 0x57, 0xa9, 0xac, - 0xc9, 0xc7, 0x32, 0x2f, 0x74, 0xd0, 0x5d, 0x20, 0xdd, 0x38, 0x81, 0x4b, 0xc8, 0xa1, 0xeb, 0x30, - 0x0c, 0x6d, 0x78, 0x0d, 0xe9, 0xa0, 0xb8, 0x48, 0x8a, 0x41, 0x5c, 0x8f, 0xe8, 0x02, 0x8c, 0xf8, - 0xaa, 0x78, 0x25, 0x07, 0xfc, 0x12, 0xe1, 0xc3, 0xcc, 0x90, 0xa2, 0xa5, 0x5a, 0x59, 0xe8, 0x69, - 0x97, 0x1d, 0xbc, 0xc9, 0x0a, 0x66, 0x48, 0xd1, 0x47, 0x58, 0xdf, 0x62, 0x45, 0x6a, 0xc4, 0x73, - 0x1e, 0x86, 0x55, 0x1c, 0x6e, 0xa9, 0xd8, 0x65, 0x13, 0x97, 0xc9, 0x00, 0x84, 0xa0, 0x60, 0x0e, - 0x2a, 0xae, 0x89, 0x78, 0x7b, 0x9b, 0xaf, 0x07, 0x67, 0x60, 0x19, 0xc6, 0xb9, 0x40, 0x05, 0x2a, - 0x76, 0x50, 0xbc, 0x43, 0x8a, 0x31, 0x03, 0xa3, 0xd7, 0xd0, 0x32, 0xd5, 0xbe, 0x74, 0x91, 0xbc, - 0xcb, 0xaf, 0x41, 0x08, 0x85, 0xb2, 0x2e, 0xe3, 0xc6, 0xa6, 0x9b, 0xe1, 0x3d, 0x0e, 0x25, 0x33, - 0xa8, 0x58, 0x84, 0xd1, 0xc8, 0x4b, 0xd2, 0x4d, 0x2f, 0x74, 0x4a, 0xc7, 0xfb, 0xe4, 0x18, 0x29, - 0x21, 0x8a, 0x48, 0x16, 0xf7, 0xa3, 0xf9, 0x80, 0x23, 0x62, 0x60, 0x74, 0xf5, 0x52, 0xed, 0xd5, - 0x43, 0x59, 0xeb, 0xc7, 0xf6, 0x21, 0x5f, 0xbd, 0x82, 0x5d, 0x31, 0x8d, 0x73, 0x50, 0x49, 0x83, - 0x33, 0x4e, 0x9a, 0x8f, 0x38, 0xd3, 0x39, 0x80, 0xf0, 0x83, 0x70, 0x63, 0xcf, 0x36, 0xe1, 0x20, - 0xfb, 0x98, 0x64, 0xd3, 0x3d, 0x5a, 0x05, 0x95, 0x84, 0x7e, 0x95, 0x9f, 0x70, 0x49, 0x90, 0x5d, - 0xae, 0x55, 0x98, 0xca, 0xe2, 0xd4, 0xdb, 0xe8, 0x2f, 0x6a, 0x9f, 0x72, 0xd4, 0x0a, 0xb6, 0x23, - 0x6a, 0x27, 0x60, 0x9a, 0x8c, 0xfd, 0xe5, 0xf5, 0x33, 0x2e, 0xac, 0x05, 0xbd, 0xde, 0x99, 0xdd, - 0x87, 0xe1, 0x40, 0x19, 0xce, 0xd3, 0x5a, 0xc6, 0x29, 0x32, 0xb5, 0xc8, 0x6b, 0x39, 0x98, 0xaf, - 0x90, 0x99, 0x2b, 0xfe, 0x52, 0x29, 0x58, 0xf1, 0x5a, 0x28, 0x7f, 0x00, 0xf6, 0xb3, 0x3c, 0x8b, - 0x13, 0xd9, 0x50, 0x7e, 0x1c, 0x9c, 0x91, 0x4d, 0x07, 0xf5, 0xe7, 0x5d, 0xa9, 0x5a, 0x37, 0x70, - 0x34, 0x1f, 0x87, 0xeb, 0xca, 0x59, 0xa5, 0x16, 0x44, 0x2d, 0x95, 0x68, 0x8b, 0xf1, 0x0b, 0xce, - 0x54, 0xc9, 0x1d, 0xcf, 0x31, 0xb1, 0x04, 0x63, 0xf9, 0x8f, 0xae, 0x47, 0xf2, 0x4b, 0x12, 0x8d, - 0xb6, 0x29, 0x2a, 0x1c, 0x0d, 0x15, 0xb5, 0xbc, 0xc4, 0xa5, 0xfe, 0x7d, 0xc5, 0x85, 0x83, 0x10, - 0x2a, 0x1c, 0x7a, 0xab, 0x25, 0xb1, 0xdb, 0x3b, 0x18, 0xbe, 0xe6, 0xc2, 0xc1, 0x0c, 0x29, 0x78, - 0x60, 0x70, 0x50, 0x7c, 0xc3, 0x0a, 0x66, 0x50, 0x71, 0x6f, 0xbb, 0xd1, 0x26, 0xd2, 0x0f, 0x52, - 0x9d, 0x78, 0xb8, 0xda, 0xa2, 0xfa, 0x76, 0xbb, 0x73, 0x08, 0x5b, 0x33, 0x50, 0xac, 0x44, 0x91, - 0x4c, 0x53, 0xcf, 0x97, 0x38, 0x71, 0x38, 0x6c, 0xec, 0x3b, 0xae, 0x44, 0x06, 0x56, 0xdc, 0xcf, - 0xf1, 0xae, 0x59, 0xa5, 0x7a, 0xcb, 0x1e, 0xd1, 0x4a, 0xc1, 0xb0, 0xeb, 0xf1, 0x1d, 0x72, 0x75, - 0x8e, 0x2a, 0xe2, 0x2e, 0x3c, 0x40, 0x9d, 0x03, 0x85, 0x5d, 0x76, 0x76, 0xa7, 0x3c, 0x43, 0x1d, - 0xf3, 0x84, 0x38, 0x06, 0xa3, 0x1d, 0xc3, 0x84, 0x5d, 0xf5, 0x04, 0xa9, 0x46, 0xcc, 0x59, 0x42, - 0x1c, 0x82, 0x01, 0x1c, 0x0c, 0xec, 0xf8, 0x93, 0x84, 0xe7, 0xcb, 0xc5, 0x11, 0x18, 0xe2, 0x81, - 0xc0, 0x8e, 0x3e, 0x45, 0x68, 0x89, 0x20, 0xce, 0xc3, 0x80, 0x1d, 0x7f, 0x9a, 0x71, 0x46, 0x10, - 0x77, 0x0f, 0xe1, 0xf7, 0xcf, 0x0e, 0x50, 0x41, 0xe7, 0xd8, 0xcd, 0xc1, 0x20, 0x4d, 0x01, 0x76, - 0xfa, 0x19, 0xfa, 0xe7, 0x4c, 0x88, 0x3b, 0x60, 0x9f, 0x63, 0xc0, 0x9f, 0x23, 0xb4, 0x58, 0x2f, - 0x16, 0x61, 0xd8, 0xe8, 0xfc, 0x76, 0xfc, 0x79, 0xc2, 0x4d, 0x0a, 0xb7, 0x4e, 0x9d, 0xdf, 0x2e, - 0x78, 0x81, 0xb7, 0x4e, 0x04, 0x86, 0x8d, 0x9b, 0xbe, 0x9d, 0x7e, 0x91, 0xa3, 0xce, 0x88, 0x98, - 0x87, 0x4a, 0x59, 0xc8, 0xed, 0xfc, 0x4b, 0xc4, 0xb7, 0x19, 0x8c, 0x80, 0xd1, 0x48, 0xec, 0x8a, - 0x97, 0x39, 0x02, 0x06, 0x85, 0xd7, 0xa8, 0x7b, 0x38, 0xb0, 0x9b, 0x5e, 0xe1, 0x6b, 0xd4, 0x35, - 0x1b, 0x60, 0x36, 0xf3, 0x7a, 0x6a, 0x57, 0xbc, 0xca, 0xd9, 0xcc, 0xd7, 0xe3, 0x36, 0xba, 0xbb, - 0xad, 0xdd, 0xf1, 0x1a, 0x6f, 0xa3, 0xab, 0xd9, 0x8a, 0x55, 0xa8, 0xee, 0xed, 0xb4, 0x76, 0xdf, - 0xeb, 0xe4, 0x9b, 0xd8, 0xd3, 0x68, 0xc5, 0xfd, 0x30, 0xdd, 0xbb, 0xcb, 0xda, 0xad, 0xe7, 0x76, - 0xba, 0x3e, 0x17, 0x99, 0x4d, 0x56, 0x9c, 0x68, 0x97, 0x6b, 0xb3, 0xc3, 0xda, 0xb5, 0xe7, 0x77, - 0x3a, 0x2b, 0xb6, 0xd9, 0x60, 0xc5, 0x02, 0x40, 0xbb, 0xb9, 0xd9, 0x5d, 0x17, 0xc8, 0x65, 0x40, - 0x78, 0x35, 0xa8, 0xb7, 0xd9, 0xf9, 0x8b, 0x7c, 0x35, 0x88, 0xc0, 0xab, 0xc1, 0x6d, 0xcd, 0x4e, - 0x5f, 0xe2, 0xab, 0xc1, 0x08, 0x9e, 0x6c, 0xa3, 0x73, 0xd8, 0x0d, 0x97, 0xf9, 0x64, 0x1b, 0x94, - 0x98, 0x83, 0xa1, 0x38, 0x0b, 0x43, 0x3c, 0xa0, 0xd5, 0x9b, 0x7b, 0xb4, 0x2b, 0x19, 0x36, 0x99, - 0xff, 0x6d, 0x97, 0x76, 0xc0, 0x80, 0x38, 0x04, 0xfb, 0x64, 0x54, 0x97, 0x4d, 0x1b, 0xf9, 0xfb, - 0x2e, 0x17, 0x25, 0x5c, 0x2d, 0xe6, 0x01, 0x8a, 0x8f, 0xf6, 0xf8, 0x2a, 0x36, 0xf6, 0x8f, 0xdd, - 0xe2, 0x5b, 0x06, 0x03, 0x69, 0x0b, 0xf2, 0x17, 0xb7, 0x08, 0xb6, 0x3b, 0x05, 0xf9, 0x5b, 0x1f, - 0x86, 0xc1, 0x47, 0x52, 0x15, 0x6b, 0xcf, 0xb7, 0xd1, 0x7f, 0x12, 0xcd, 0xeb, 0x31, 0x60, 0x91, - 0x4a, 0xa4, 0xf6, 0xfc, 0xd4, 0xc6, 0xfe, 0x45, 0x6c, 0x09, 0x20, 0xdc, 0xf0, 0x52, 0xed, 0xf2, - 0xde, 0x7f, 0x33, 0xcc, 0x00, 0x6e, 0x1a, 0x9f, 0x1f, 0x95, 0x5b, 0x36, 0xf6, 0x1f, 0xde, 0x34, - 0xad, 0x17, 0x47, 0xa0, 0x82, 0x8f, 0xf9, 0xb7, 0x22, 0x36, 0xf8, 0x5f, 0x82, 0xdb, 0x04, 0xfe, - 0xe7, 0x54, 0x37, 0x75, 0x60, 0x0f, 0xf6, 0x7f, 0x94, 0x69, 0x5e, 0x2f, 0x16, 0x60, 0x38, 0xd5, - 0xcd, 0x66, 0x46, 0xf3, 0x95, 0x05, 0xff, 0x7f, 0xb7, 0xfc, 0xc8, 0x5d, 0x32, 0x47, 0x97, 0x60, - 0xb2, 0xa1, 0xa2, 0x6e, 0xf0, 0x28, 0x2c, 0xab, 0x65, 0xb5, 0x9a, 0x5f, 0xc5, 0x87, 0x6e, 0xf3, - 0x03, 0xbd, 0x99, 0xd5, 0x67, 0x1a, 0x2a, 0x9a, 0xc5, 0xc1, 0xb7, 0xfd, 0x7d, 0x5e, 0x39, 0x06, - 0x5f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x51, 0xf0, 0xa5, 0x95, 0x02, 0x14, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden deleted file mode 100644 index f6502e4..0000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by protoc-gen-go. -// source: gogo.proto -// DO NOT EDIT! - -package gogoproto - -import proto "github.com/gogo/protobuf/proto" -import json "encoding/json" -import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51235, - Name: "gogoproto.nullable", - Tag: "varint,51235,opt,name=nullable", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51236, - Name: "gogoproto.embed", - Tag: "varint,51236,opt,name=embed", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 51237, - Name: "gogoproto.customtype", - Tag: "bytes,51237,opt,name=customtype", -} - -func init() { - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto deleted file mode 100644 index bc8d889..0000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ /dev/null @@ -1,136 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option java_package = "com.google.protobuf"; -option java_outer_classname = "GoGoProtos"; -option go_package = "github.com/gogo/protobuf/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go deleted file mode 100644 index a96760d..0000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ /dev/null @@ -1,361 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gogoproto - -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" -import proto "github.com/gogo/protobuf/proto" - -func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Embed, false) -} - -func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Nullable, true) -} - -func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Stdtime, false) -} - -func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Stdduration, false) -} - -func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { - nullable := IsNullable(field) - if field.IsMessage() || IsCustomType(field) { - return nullable - } - if proto3 { - return false - } - return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES -} - -func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCustomType(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastType(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastKey(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastValue(field) - if len(typ) > 0 { - return true - } - return false -} - -func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) -} - -func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) -} - -func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Customtype) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastType(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Casttype) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Castkey) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Castvalue) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { - name := GetCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { - name := GetEnumCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { - name := GetEnumValueCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Customname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_EnumCustomname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Jsontag) - if err == nil && v.(*string) != nil { - return (v.(*string)) - } - } - return nil -} - -func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Moretags) - if err == nil && v.(*string) != nil { - return (v.(*string)) - } - } - return nil -} - -type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool - -func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) -} - -func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) -} - -func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) -} - -func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) -} - -func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) -} - -func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) -} - -func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) -} - -func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) -} - -func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) -} - -func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) -} - -func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) -} - -func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) -} - -func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) -} - -func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) -} - -func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) -} - -func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) -} - -func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) -} - -func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) -} - -func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) -} - -func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) -} - -func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) -} - -func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) -} - -func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) -} - -func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - if IsProto3(file) { - return false - } - return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) -} - -func IsProto3(file *google_protobuf.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { - return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) -} - -func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) -} - -func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { - return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) -} - -func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 41c7175..0000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index 5d4cba4..0000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,234 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 737f273..0000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,978 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { - if isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.GetExtensions() - *ext = append(*ext, o.buf[oi:o.index]...) - } - continue - } - } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go deleted file mode 100644 index 6fb74de..0000000 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ /dev/null @@ -1,172 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -// Decode a reference to a struct pointer. -func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is a pointer receiver") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - bas := structPointer_FieldPointer(base, p.field) - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers ([]struct). -func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { - newBas := appendStructPointer(base, p.field, p.sstype) - - if is_group { - panic("not supported, maybe in future, if requested.") - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is not a pointer receiver.") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) - - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers. -func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_ref_struct(p, false, base) -} - -func setPtrCustomType(base structPointer, f field, v interface{}) { - if v == nil { - return - } - structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) -} - -func setCustomType(base structPointer, f field, value interface{}) { - if value == nil { - return - } - v := reflect.ValueOf(value).Elem() - t := reflect.TypeOf(value).Elem() - kind := t.Kind() - switch kind { - case reflect.Slice: - slice := reflect.MakeSlice(t, v.Len(), v.Cap()) - reflect.Copy(slice, v) - oldHeader := structPointer_GetSliceHeader(base, f) - oldHeader.Data = slice.Pointer() - oldHeader.Len = v.Len() - oldHeader.Cap = v.Cap() - default: - size := reflect.TypeOf(value).Elem().Size() - structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) - } -} - -func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - setPtrCustomType(base, p.field, custom) - return nil -} - -func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - if custom != nil { - setCustomType(base, p.field, custom) - } - return nil -} - -// Decode a slice of bytes ([]byte) into a slice of custom types. -func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - newBas := appendStructPointer(base, p.field, p.ctype) - - var zero field - setCustomType(newBas, zero, custom) - - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index bd0e3bb..0000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,151 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" -) - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - discardLegacy(m) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, ok := extendable(m); ok { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c9..0000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index 18e2a5f..0000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,203 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} - -func (o *Buffer) decDuration() (time.Duration, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return 0, err - } - dproto := &duration{} - if err := Unmarshal(b, dproto); err != nil { - return 0, err - } - return durationFromProto(dproto) -} - -func (o *Buffer) dec_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) - var zero field - setPtrCustomType(newBas, zero, &d) - return nil -} - -func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - structPointer_Word64Slice(base, p.field).Append(uint64(d)) - return nil -} - -func size_duration(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_duration(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_duration(p *Properties, base structPointer) (n int) { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return 0 - } - dproto := durationProto(*durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return errRepeatedHasNil - } - dproto := durationProto(*durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 8b84d1b..0000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,1362 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 32111b7..0000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,350 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// http://github.com/golang/protobuf/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} - -type Sizer interface { - Size() int -} - -func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, s...) - return nil -} - -func size_ext_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(s) - return -} - -// Encode a reference to bool pointer. -func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - x := 0 - if v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_bool(p *Properties, base structPointer) int { - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode a reference to int32 pointer. -func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a reference to an int64 pointer. -func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_ref_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a reference to a string pointer. -func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_ref_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// Encode a reference to a message struct. -func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -//TODO this is only copied, please fix this -func size_ref_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a slice of references to message struct pointers ([]struct). -func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - } - return state.err -} - -//TODO this is only copied, please fix this -func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return ErrNil - } - custom := i.(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { - custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceAt(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return ErrNil - } - slice := reflect.ValueOf(inter) - l := slice.Len() - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return 0 - } - slice := reflect.ValueOf(inter) - l := slice.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - } - return -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index 2ed1cf5..0000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 0dfcb53..0000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,693 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -type extensionRange interface { - Message - ExtensionRangeArray() []ExtensionRange -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() -var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() -var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extensionRange, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, doki := pb.(extensionsBytes); doki { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, ok := extendable(pb) - if !ok { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - o := 0 - for o < len(*ext) { - tag, n := DecodeVarint((*ext)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size((*ext)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - v, err := decodeExtension((*ext)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) - } - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(pb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, doki := pb.(extensionsBytes); doki { - ClearExtension(pb, extension) - ext := epb.GetExtensions() - et := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - p := NewBuffer(nil) - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - *ext = append(*ext, p.buf...) - return nil - } - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index ea6478f..0000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,294 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" -) - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - return SizeOfExtensionMap(m.extensionsWrite()) -} - -func SizeOfExtensionMap(m map[int32]Extension) (n int) { - return extensionsMapSize(m) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - if err := encodeExtensionsMap(m); err != nil { - return 0, err - } - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - for _, k := range keys { - n += copy(data[n:], m[int32(k)].enc) - } - return n, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - if m[id].value == nil || m[id].desc == nil { - return m[id].enc, nil - } - if err := encodeExtensionsMap(m); err != nil { - return nil, err - } - return m[id].enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(e *Extension) error { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - return nil - } - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - return nil -} - -func (this Extension) GoString() string { - if this.enc == nil { - if err := encodeExtension(&this); err != nil { - panic(err) - } - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index c98d73d..0000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,897 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion1 = true diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index 4b4f7c9..0000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,42 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index fd982de..0000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,311 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index fb512e2..0000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 1763a5f..0000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,85 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -package proto - -import ( - "reflect" -) - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - panic("not implemented") -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - panic("not implemented") -} - -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - panic("not implemented") -} - -func structPointer_Add(p structPointer, size field) structPointer { - panic("not implemented") -} - -func structPointer_Len(p structPointer, f field) int { - panic("not implemented") -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - panic("not implemented") -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - panic("not implemented") -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - panic("not implemented") -} - -type structRefSlice struct{} - -func (v *structRefSlice) Len() int { - panic("not implemented") -} - -func (v *structRefSlice) Index(i int) structPointer { - panic("not implemented") -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index 6b5567d..0000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,270 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index f156a29..0000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,128 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - return r.Interface() -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - if r.Elem().IsNil() { - return nil - } - return r.Elem().Interface() -} - -func copyUintPtr(oldptr, newptr uintptr, size int) { - oldbytes := make([]byte, 0) - oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) - oldslice.Data = oldptr - oldslice.Len = size - oldslice.Cap = size - newbytes := make([]byte, 0) - newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) - newslice.Data = newptr - newslice.Len = size - newslice.Cap = size - copy(newbytes, oldbytes) -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - copyUintPtr(uintptr(oldptr), uintptr(newptr), size) -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - size := typ.Elem().Size() - - oldHeader := structPointer_GetSliceHeader(base, f) - oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() - newLen := oldHeader.Len + 1 - newSlice := reflect.MakeSlice(typ, newLen, newLen) - reflect.Copy(newSlice, oldSlice) - bas := toStructPointer(newSlice) - oldHeader.Data = uintptr(bas) - oldHeader.Len = newLen - oldHeader.Cap = newLen - - return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) -} - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_Add(p structPointer, size field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) -} - -func structPointer_Len(p structPointer, f field) int { - return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - return &structRefSlice{p: p, f: f, size: size} -} - -// A structRefSlice represents a slice of structs (themselves submessages or groups). -type structRefSlice struct { - p structPointer - f field - size uintptr -} - -func (v *structRefSlice) Len() int { - return structPointer_Len(v.p, v.f) -} - -func (v *structRefSlice) Index(i int) structPointer { - ss := structPointer_GetStructPointer(v.p, v.f) - ss1 := structPointer_GetRefStructPointer(ss, 0) - return structPointer_Add(ss1, field(uintptr(i)*v.size)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 2a69e88..0000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,971 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sstype reflect.Type // set for slices of structs types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.setCustomEncAndDec(typ) - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTimeEncAndDec(typ) - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setDurationEncAndDec(typ) - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - } else { - p.enc = (*Buffer).enc_ref_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_ref_bool - } - case reflect.Int32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - } else { - p.enc = (*Buffer).enc_ref_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_int32 - } - case reflect.Uint32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_ref_uint32 - } - case reflect.Int64, reflect.Uint64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.Float32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_uint32 - } - case reflect.Float64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.String: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - } else { - p.enc = (*Buffer).enc_ref_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_ref_string - } - case reflect.Struct: - p.stype = typ - p.isMarshaler = isMarshaler(typ) - p.isUnmarshaler = isUnmarshaler(typ) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_ref_struct_message - p.dec = (*Buffer).dec_ref_struct_message - p.size = size_ref_struct_message - } else { - fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) - } - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - case reflect.Struct: - p.setSliceOfNonPointerStructs(t1) - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.mvalprop.CustomType = p.CustomType - p.mvalprop.StdDuration = p.StdDuration - p.mvalprop.StdTime = p.StdTime - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) || - reflect.PtrTo(t).Implements(extendableBytesType) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - if len(f.Tag.Get("protobuf")) > 0 { - p.enc = (*Buffer).enc_ext_slice_byte - p.dec = nil // not needed - p.size = size_ext_slice_byte - } else { - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index b6b7176..0000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,111 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "os" - "reflect" -) - -func (p *Properties) setCustomEncAndDec(typ reflect.Type) { - p.ctype = typ - if p.Repeated { - p.enc = (*Buffer).enc_custom_slice_bytes - p.dec = (*Buffer).dec_custom_slice_bytes - p.size = size_custom_slice_bytes - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_custom_bytes - p.dec = (*Buffer).dec_custom_bytes - p.size = size_custom_bytes - } else { - p.enc = (*Buffer).enc_custom_ref_bytes - p.dec = (*Buffer).dec_custom_ref_bytes - p.size = size_custom_ref_bytes - } -} - -func (p *Properties) setDurationEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_duration - p.dec = (*Buffer).dec_slice_duration - p.size = size_slice_duration - } else { - p.enc = (*Buffer).enc_slice_ref_duration - p.dec = (*Buffer).dec_slice_ref_duration - p.size = size_slice_ref_duration - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_duration - p.dec = (*Buffer).dec_duration - p.size = size_duration - } else { - p.enc = (*Buffer).enc_ref_duration - p.dec = (*Buffer).dec_ref_duration - p.size = size_ref_duration - } -} - -func (p *Properties) setTimeEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_time - p.dec = (*Buffer).dec_slice_time - p.size = size_slice_time - } else { - p.enc = (*Buffer).enc_slice_ref_time - p.dec = (*Buffer).dec_slice_ref_time - p.size = size_slice_ref_time - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_time - p.dec = (*Buffer).dec_time - p.size = size_time - } else { - p.enc = (*Buffer).enc_ref_time - p.dec = (*Buffer).dec_ref_time - p.size = size_ref_time - } - -} - -func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { - t2 := typ.Elem() - p.sstype = typ - p.stype = t2 - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - p.enc = (*Buffer).enc_slice_ref_struct_message - p.dec = (*Buffer).dec_slice_ref_struct_message - p.size = size_slice_ref_struct_message - if p.Wire != "bytes" { - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93..0000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index f609d1d..0000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,939 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if pv.Type().Implements(extensionRangeType) { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa..0000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index f127672..0000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1013 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f65..0000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index d427647..0000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,229 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} - -func (o *Buffer) decTimestamp() (time.Time, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return time.Time{}, err - } - tproto := ×tamp{} - if err := Unmarshal(b, tproto); err != nil { - return time.Time{}, err - } - return timestampFromProto(tproto) -} - -func (o *Buffer) dec_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setPtrCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) - var zero field - setPtrCustomType(newBas, zero, &t) - return nil -} - -func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) - var zero field - setCustomType(newBas, zero, &t) - return nil -} - -func size_time(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_time(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_time(p *Properties, base structPointer) (n int) { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return 0 - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return errRepeatedHasNil - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile deleted file mode 100644 index 3496dc9..0000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo - go install github.com/gogo/protobuf/protoc-gen-gostring - protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto - protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go deleted file mode 100644 index a85bf19..0000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go +++ /dev/null @@ -1,118 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package descriptor provides functions for obtaining protocol buffer -// descriptors for generated Go types. -// -// These functions cannot go in package proto because they depend on the -// generated protobuf descriptor messages, which themselves depend on proto. -package descriptor - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - - "github.com/gogo/protobuf/proto" -) - -// extractFile extracts a FileDescriptorProto from a gzip'd buffer. -func extractFile(gz []byte) (*FileDescriptorProto, error) { - r, err := gzip.NewReader(bytes.NewReader(gz)) - if err != nil { - return nil, fmt.Errorf("failed to open gzip reader: %v", err) - } - defer r.Close() - - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) - } - - fd := new(FileDescriptorProto) - if err := proto.Unmarshal(b, fd); err != nil { - return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) - } - - return fd, nil -} - -// Message is a proto.Message with a method to return its descriptor. -// -// Message types generated by the protocol compiler always satisfy -// the Message interface. -type Message interface { - proto.Message - Descriptor() ([]byte, []int) -} - -// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it -// describing the given message. -func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { - gz, path := msg.Descriptor() - fd, err := extractFile(gz) - if err != nil { - panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) - } - - md = fd.MessageType[path[0]] - for _, i := range path[1:] { - md = md.NestedType[i] - } - return fd, md -} - -// Is this field a scalar numeric type? -func (field *FieldDescriptorProto) IsScalar() bool { - if field.Type == nil { - return false - } - switch *field.Type { - case FieldDescriptorProto_TYPE_DOUBLE, - FieldDescriptorProto_TYPE_FLOAT, - FieldDescriptorProto_TYPE_INT64, - FieldDescriptorProto_TYPE_UINT64, - FieldDescriptorProto_TYPE_INT32, - FieldDescriptorProto_TYPE_FIXED64, - FieldDescriptorProto_TYPE_FIXED32, - FieldDescriptorProto_TYPE_BOOL, - FieldDescriptorProto_TYPE_UINT32, - FieldDescriptorProto_TYPE_ENUM, - FieldDescriptorProto_TYPE_SFIXED32, - FieldDescriptorProto_TYPE_SFIXED64, - FieldDescriptorProto_TYPE_SINT32, - FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go deleted file mode 100644 index a63db3c..0000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2281 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: descriptor.proto - -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ -package descriptor - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} } - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} } - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{2, 0} -} - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{2, 1} -} - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{6} } - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{6, 0} -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{7} -} - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{18, 0} -} - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{19, 0} -} - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{20} } - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{20, 0} -} - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) -} - -func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } - -var fileDescriptorDescriptor = []byte{ - // 2487 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, - 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f, - 0x38, 0x49, 0xab, 0x2c, 0x9c, 0xc4, 0xc9, 0x3a, 0xc5, 0xb6, 0xb2, 0xc4, 0x78, 0x95, 0xca, 0x92, - 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, - 0x83, 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0x7a, 0x59, 0xa0, 0xd7, 0x02, 0x05, - 0xda, 0x7b, 0xaf, 0x05, 0x7a, 0xef, 0xa1, 0x40, 0x0b, 0xb4, 0x7f, 0x42, 0x8f, 0xc5, 0xcc, 0x90, - 0x14, 0xf5, 0x95, 0x78, 0x17, 0x48, 0xf6, 0x64, 0xcf, 0xef, 0xfd, 0xde, 0xe3, 0x9b, 0x37, 0x6f, - 0xde, 0xbc, 0x19, 0x01, 0xd2, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0xc5, 0x71, 0x6d, 0xdf, - 0xc6, 0x6b, 0x03, 0xdb, 0x1e, 0x98, 0x54, 0x8c, 0x4e, 0xc6, 0xfd, 0xf2, 0x11, 0xac, 0xdf, 0x33, - 0x4c, 0x5a, 0x8f, 0x88, 0x5d, 0xea, 0xe3, 0x3b, 0x90, 0xee, 0x1b, 0x26, 0x95, 0x12, 0xdb, 0xa9, - 0x9d, 0xc2, 0xee, 0x87, 0x95, 0x19, 0xa5, 0xca, 0xb4, 0x46, 0x87, 0xc1, 0x0a, 0xd7, 0x28, 0xff, - 0x3b, 0x0d, 0x1b, 0x0b, 0xa4, 0x18, 0x43, 0xda, 0x22, 0x23, 0x66, 0x31, 0xb1, 0x93, 0x57, 0xf8, - 0xff, 0x58, 0x82, 0x15, 0x87, 0x68, 0x8f, 0xc9, 0x80, 0x4a, 0x49, 0x0e, 0x87, 0x43, 0xfc, 0x3e, - 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0x4e, 0xa5, 0xd4, 0x76, 0x6a, 0x27, 0xaf, 0xc4, 0x10, - 0x7c, 0x0d, 0xd6, 0x9d, 0xf1, 0x89, 0x69, 0x68, 0x6a, 0x8c, 0x06, 0xdb, 0xa9, 0x9d, 0x8c, 0x82, - 0x84, 0xa0, 0x3e, 0x21, 0x5f, 0x86, 0xb5, 0xa7, 0x94, 0x3c, 0x8e, 0x53, 0x0b, 0x9c, 0x5a, 0x62, - 0x70, 0x8c, 0x58, 0x83, 0xe2, 0x88, 0x7a, 0x1e, 0x19, 0x50, 0xd5, 0x3f, 0x75, 0xa8, 0x94, 0xe6, - 0xb3, 0xdf, 0x9e, 0x9b, 0xfd, 0xec, 0xcc, 0x0b, 0x81, 0x56, 0xef, 0xd4, 0xa1, 0xb8, 0x0a, 0x79, - 0x6a, 0x8d, 0x47, 0xc2, 0x42, 0x66, 0x49, 0xfc, 0x64, 0x6b, 0x3c, 0x9a, 0xb5, 0x92, 0x63, 0x6a, - 0x81, 0x89, 0x15, 0x8f, 0xba, 0x4f, 0x0c, 0x8d, 0x4a, 0x59, 0x6e, 0xe0, 0xf2, 0x9c, 0x81, 0xae, - 0x90, 0xcf, 0xda, 0x08, 0xf5, 0x70, 0x0d, 0xf2, 0xf4, 0x99, 0x4f, 0x2d, 0xcf, 0xb0, 0x2d, 0x69, - 0x85, 0x1b, 0xf9, 0x68, 0xc1, 0x2a, 0x52, 0x53, 0x9f, 0x35, 0x31, 0xd1, 0xc3, 0x7b, 0xb0, 0x62, - 0x3b, 0xbe, 0x61, 0x5b, 0x9e, 0x94, 0xdb, 0x4e, 0xec, 0x14, 0x76, 0xdf, 0x5d, 0x98, 0x08, 0x6d, - 0xc1, 0x51, 0x42, 0x32, 0x6e, 0x00, 0xf2, 0xec, 0xb1, 0xab, 0x51, 0x55, 0xb3, 0x75, 0xaa, 0x1a, - 0x56, 0xdf, 0x96, 0xf2, 0xdc, 0xc0, 0xc5, 0xf9, 0x89, 0x70, 0x62, 0xcd, 0xd6, 0x69, 0xc3, 0xea, - 0xdb, 0x4a, 0xc9, 0x9b, 0x1a, 0xe3, 0x73, 0x90, 0xf5, 0x4e, 0x2d, 0x9f, 0x3c, 0x93, 0x8a, 0x3c, - 0x43, 0x82, 0x51, 0xf9, 0xcf, 0x59, 0x58, 0x3b, 0x4b, 0x8a, 0xdd, 0x85, 0x4c, 0x9f, 0xcd, 0x52, - 0x4a, 0x7e, 0x93, 0x18, 0x08, 0x9d, 0xe9, 0x20, 0x66, 0xbf, 0x65, 0x10, 0xab, 0x50, 0xb0, 0xa8, - 0xe7, 0x53, 0x5d, 0x64, 0x44, 0xea, 0x8c, 0x39, 0x05, 0x42, 0x69, 0x3e, 0xa5, 0xd2, 0xdf, 0x2a, - 0xa5, 0x1e, 0xc0, 0x5a, 0xe4, 0x92, 0xea, 0x12, 0x6b, 0x10, 0xe6, 0xe6, 0xf5, 0x57, 0x79, 0x52, - 0x91, 0x43, 0x3d, 0x85, 0xa9, 0x29, 0x25, 0x3a, 0x35, 0xc6, 0x75, 0x00, 0xdb, 0xa2, 0x76, 0x5f, - 0xd5, 0xa9, 0x66, 0x4a, 0xb9, 0x25, 0x51, 0x6a, 0x33, 0xca, 0x5c, 0x94, 0x6c, 0x81, 0x6a, 0x26, - 0xfe, 0x74, 0x92, 0x6a, 0x2b, 0x4b, 0x32, 0xe5, 0x48, 0x6c, 0xb2, 0xb9, 0x6c, 0x3b, 0x86, 0x92, - 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x98, 0x59, 0x9e, 0x3b, 0x51, 0x79, 0xe5, 0xcc, 0x94, 0x40, 0x4d, - 0x4c, 0x6c, 0xd5, 0x8d, 0x0f, 0xf1, 0x07, 0x10, 0x01, 0x2a, 0x4f, 0x2b, 0xe0, 0x55, 0xa8, 0x18, - 0x82, 0x2d, 0x32, 0xa2, 0x5b, 0xcf, 0xa1, 0x34, 0x1d, 0x1e, 0xbc, 0x09, 0x19, 0xcf, 0x27, 0xae, - 0xcf, 0xb3, 0x30, 0xa3, 0x88, 0x01, 0x46, 0x90, 0xa2, 0x96, 0xce, 0xab, 0x5c, 0x46, 0x61, 0xff, - 0xe2, 0x1f, 0x4d, 0x26, 0x9c, 0xe2, 0x13, 0xfe, 0x78, 0x7e, 0x45, 0xa7, 0x2c, 0xcf, 0xce, 0x7b, - 0xeb, 0x36, 0xac, 0x4e, 0x4d, 0xe0, 0xac, 0x9f, 0x2e, 0xff, 0x02, 0xde, 0x5e, 0x68, 0x1a, 0x3f, - 0x80, 0xcd, 0xb1, 0x65, 0x58, 0x3e, 0x75, 0x1d, 0x97, 0xb2, 0x8c, 0x15, 0x9f, 0x92, 0xfe, 0xb3, - 0xb2, 0x24, 0xe7, 0x8e, 0xe3, 0x6c, 0x61, 0x45, 0xd9, 0x18, 0xcf, 0x83, 0x57, 0xf3, 0xb9, 0xff, - 0xae, 0xa0, 0x17, 0x2f, 0x5e, 0xbc, 0x48, 0x96, 0x7f, 0x9d, 0x85, 0xcd, 0x45, 0x7b, 0x66, 0xe1, - 0xf6, 0x3d, 0x07, 0x59, 0x6b, 0x3c, 0x3a, 0xa1, 0x2e, 0x0f, 0x52, 0x46, 0x09, 0x46, 0xb8, 0x0a, - 0x19, 0x93, 0x9c, 0x50, 0x53, 0x4a, 0x6f, 0x27, 0x76, 0x4a, 0xbb, 0xd7, 0xce, 0xb4, 0x2b, 0x2b, - 0x4d, 0xa6, 0xa2, 0x08, 0x4d, 0xfc, 0x19, 0xa4, 0x83, 0x12, 0xcd, 0x2c, 0x5c, 0x3d, 0x9b, 0x05, - 0xb6, 0x97, 0x14, 0xae, 0x87, 0xdf, 0x81, 0x3c, 0xfb, 0x2b, 0x72, 0x23, 0xcb, 0x7d, 0xce, 0x31, - 0x80, 0xe5, 0x05, 0xde, 0x82, 0x1c, 0xdf, 0x26, 0x3a, 0x0d, 0x8f, 0xb6, 0x68, 0xcc, 0x12, 0x4b, - 0xa7, 0x7d, 0x32, 0x36, 0x7d, 0xf5, 0x09, 0x31, 0xc7, 0x94, 0x27, 0x7c, 0x5e, 0x29, 0x06, 0xe0, - 0x4f, 0x19, 0x86, 0x2f, 0x42, 0x41, 0xec, 0x2a, 0xc3, 0xd2, 0xe9, 0x33, 0x5e, 0x3d, 0x33, 0x8a, - 0xd8, 0x68, 0x0d, 0x86, 0xb0, 0xcf, 0x3f, 0xf2, 0x6c, 0x2b, 0x4c, 0x4d, 0xfe, 0x09, 0x06, 0xf0, - 0xcf, 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x9e, 0xde, 0x6c, 0x4e, 0x95, 0xff, 0x94, 0x84, 0x34, - 0xaf, 0x17, 0x6b, 0x50, 0xe8, 0x3d, 0xec, 0xc8, 0x6a, 0xbd, 0x7d, 0x7c, 0xd0, 0x94, 0x51, 0x02, - 0x97, 0x00, 0x38, 0x70, 0xaf, 0xd9, 0xae, 0xf6, 0x50, 0x32, 0x1a, 0x37, 0x5a, 0xbd, 0xbd, 0x9b, - 0x28, 0x15, 0x29, 0x1c, 0x0b, 0x20, 0x1d, 0x27, 0xdc, 0xd8, 0x45, 0x19, 0x8c, 0xa0, 0x28, 0x0c, - 0x34, 0x1e, 0xc8, 0xf5, 0xbd, 0x9b, 0x28, 0x3b, 0x8d, 0xdc, 0xd8, 0x45, 0x2b, 0x78, 0x15, 0xf2, - 0x1c, 0x39, 0x68, 0xb7, 0x9b, 0x28, 0x17, 0xd9, 0xec, 0xf6, 0x94, 0x46, 0xeb, 0x10, 0xe5, 0x23, - 0x9b, 0x87, 0x4a, 0xfb, 0xb8, 0x83, 0x20, 0xb2, 0x70, 0x24, 0x77, 0xbb, 0xd5, 0x43, 0x19, 0x15, - 0x22, 0xc6, 0xc1, 0xc3, 0x9e, 0xdc, 0x45, 0xc5, 0x29, 0xb7, 0x6e, 0xec, 0xa2, 0xd5, 0xe8, 0x13, - 0x72, 0xeb, 0xf8, 0x08, 0x95, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, 0x4e, 0xac, 0xcd, 0x40, 0x7b, - 0x37, 0x11, 0x9a, 0x38, 0x22, 0xac, 0xac, 0x4f, 0x01, 0x7b, 0x37, 0x11, 0x2e, 0xd7, 0x20, 0xc3, - 0xb3, 0x0b, 0x63, 0x28, 0x35, 0xab, 0x07, 0x72, 0x53, 0x6d, 0x77, 0x7a, 0x8d, 0x76, 0xab, 0xda, - 0x44, 0x89, 0x09, 0xa6, 0xc8, 0x3f, 0x39, 0x6e, 0x28, 0x72, 0x1d, 0x25, 0xe3, 0x58, 0x47, 0xae, - 0xf6, 0xe4, 0x3a, 0x4a, 0x95, 0x35, 0xd8, 0x5c, 0x54, 0x27, 0x17, 0xee, 0x8c, 0xd8, 0x12, 0x27, - 0x97, 0x2c, 0x31, 0xb7, 0x35, 0xb7, 0xc4, 0xff, 0x4a, 0xc2, 0xc6, 0x82, 0xb3, 0x62, 0xe1, 0x47, - 0x7e, 0x08, 0x19, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb2, 0xf0, 0xd0, 0xe1, 0x09, 0x3b, 0x77, 0x82, - 0x72, 0xbd, 0x78, 0x07, 0x91, 0x5a, 0xd2, 0x41, 0x30, 0x13, 0x73, 0x35, 0xfd, 0xe7, 0x73, 0x35, - 0x5d, 0x1c, 0x7b, 0x7b, 0x67, 0x39, 0xf6, 0x38, 0xf6, 0xcd, 0x6a, 0x7b, 0x66, 0x41, 0x6d, 0xbf, - 0x0b, 0xeb, 0x73, 0x86, 0xce, 0x5c, 0x63, 0x7f, 0x99, 0x00, 0x69, 0x59, 0x70, 0x5e, 0x51, 0xe9, - 0x92, 0x53, 0x95, 0xee, 0xee, 0x6c, 0x04, 0x2f, 0x2d, 0x5f, 0x84, 0xb9, 0xb5, 0xfe, 0x3a, 0x01, - 0xe7, 0x16, 0x77, 0x8a, 0x0b, 0x7d, 0xf8, 0x0c, 0xb2, 0x23, 0xea, 0x0f, 0xed, 0xb0, 0x5b, 0xfa, - 0x78, 0xc1, 0x19, 0xcc, 0xc4, 0xb3, 0x8b, 0x1d, 0x68, 0xc5, 0x0f, 0xf1, 0xd4, 0xb2, 0x76, 0x4f, - 0x78, 0x33, 0xe7, 0xe9, 0xaf, 0x92, 0xf0, 0xf6, 0x42, 0xe3, 0x0b, 0x1d, 0x7d, 0x0f, 0xc0, 0xb0, - 0x9c, 0xb1, 0x2f, 0x3a, 0x22, 0x51, 0x60, 0xf3, 0x1c, 0xe1, 0xc5, 0x8b, 0x15, 0xcf, 0xb1, 0x1f, - 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x3b, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0xf7, 0x97, 0xcc, - 0x74, 0x2e, 0x31, 0x3f, 0x01, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x46, 0x86, - 0x35, 0xe0, 0x27, 0x48, 0x6e, 0x3f, 0xd3, 0x27, 0xa6, 0x47, 0x95, 0x35, 0x21, 0xee, 0x86, 0x52, - 0xa6, 0xc1, 0x13, 0xc8, 0x8d, 0x69, 0x64, 0xa7, 0x34, 0x84, 0x38, 0xd2, 0x28, 0xff, 0x31, 0x07, - 0x85, 0x58, 0x5f, 0x8d, 0x2f, 0x41, 0xf1, 0x11, 0x79, 0x42, 0xd4, 0xf0, 0xae, 0x24, 0x22, 0x51, - 0x60, 0x58, 0x27, 0xb8, 0x2f, 0x7d, 0x02, 0x9b, 0x9c, 0x62, 0x8f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, - 0x3c, 0x8f, 0x07, 0x2d, 0xc7, 0xa9, 0x98, 0xc9, 0xda, 0x4c, 0x54, 0x0b, 0x25, 0xf8, 0x16, 0x6c, - 0x70, 0x8d, 0xd1, 0xd8, 0xf4, 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xbd, 0x79, 0xfc, 0x24, 0x89, 0x3c, - 0x5b, 0x67, 0x8c, 0xa3, 0x80, 0xc0, 0x3c, 0xf2, 0x70, 0x1d, 0xde, 0xe3, 0x6a, 0x03, 0x6a, 0x51, - 0x97, 0xf8, 0x54, 0xa5, 0x5f, 0x8d, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x21, 0xf1, 0x86, 0xd2, - 0x26, 0x33, 0x70, 0x90, 0x94, 0x12, 0xca, 0x05, 0x46, 0x3c, 0x0c, 0x78, 0x32, 0xa7, 0x55, 0x2d, - 0xfd, 0x73, 0xe2, 0x0d, 0xf1, 0x3e, 0x9c, 0xe3, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x81, 0xaa, 0x0d, - 0xa9, 0xf6, 0x58, 0x1d, 0xfb, 0xfd, 0x3b, 0xd2, 0x3b, 0xf1, 0xef, 0x73, 0x0f, 0xbb, 0x9c, 0x53, - 0x63, 0x94, 0x63, 0xbf, 0x7f, 0x07, 0x77, 0xa1, 0xc8, 0x16, 0x63, 0x64, 0x3c, 0xa7, 0x6a, 0xdf, - 0x76, 0xf9, 0xd1, 0x58, 0x5a, 0x50, 0x9a, 0x62, 0x11, 0xac, 0xb4, 0x03, 0x85, 0x23, 0x5b, 0xa7, - 0xfb, 0x99, 0x6e, 0x47, 0x96, 0xeb, 0x4a, 0x21, 0xb4, 0x72, 0xcf, 0x76, 0x59, 0x42, 0x0d, 0xec, - 0x28, 0xc0, 0x05, 0x91, 0x50, 0x03, 0x3b, 0x0c, 0xef, 0x2d, 0xd8, 0xd0, 0x34, 0x31, 0x67, 0x43, - 0x53, 0x83, 0x3b, 0x96, 0x27, 0xa1, 0xa9, 0x60, 0x69, 0xda, 0xa1, 0x20, 0x04, 0x39, 0xee, 0xe1, - 0x4f, 0xe1, 0xed, 0x49, 0xb0, 0xe2, 0x8a, 0xeb, 0x73, 0xb3, 0x9c, 0x55, 0xbd, 0x05, 0x1b, 0xce, - 0xe9, 0xbc, 0x22, 0x9e, 0xfa, 0xa2, 0x73, 0x3a, 0xab, 0x76, 0x1b, 0x36, 0x9d, 0xa1, 0x33, 0xaf, - 0x77, 0x35, 0xae, 0x87, 0x9d, 0xa1, 0x33, 0xab, 0xf8, 0x11, 0xbf, 0x70, 0xbb, 0x54, 0x23, 0x3e, - 0xd5, 0xa5, 0xf3, 0x71, 0x7a, 0x4c, 0x80, 0xaf, 0x03, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4, - 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, 0x62, 0x9c, 0x5c, 0xd2, 0x34, 0x99, 0x4b, 0xab, 0x5c, 0x88, - 0xaf, 0xc2, 0xba, 0x7d, 0xf2, 0x48, 0x13, 0x29, 0xa9, 0x3a, 0x2e, 0xed, 0x1b, 0xcf, 0xa4, 0x0f, - 0x79, 0x7c, 0xd7, 0x98, 0x80, 0x27, 0x64, 0x87, 0xc3, 0xf8, 0x0a, 0x20, 0xcd, 0x1b, 0x12, 0xd7, - 0xe1, 0x35, 0xd9, 0x73, 0x88, 0x46, 0xa5, 0x8f, 0x04, 0x55, 0xe0, 0xad, 0x10, 0x66, 0x5b, 0xc2, - 0x7b, 0x6a, 0xf4, 0xfd, 0xd0, 0xe2, 0x65, 0xb1, 0x25, 0x38, 0x16, 0x58, 0xdb, 0x01, 0xc4, 0x42, - 0x31, 0xf5, 0xe1, 0x1d, 0x4e, 0x2b, 0x39, 0x43, 0x27, 0xfe, 0xdd, 0x0f, 0x60, 0x95, 0x31, 0x27, - 0x1f, 0xbd, 0x22, 0x1a, 0x32, 0x67, 0x18, 0xfb, 0xe2, 0x6b, 0xeb, 0x8d, 0xcb, 0xfb, 0x50, 0x8c, - 0xe7, 0x27, 0xce, 0x83, 0xc8, 0x50, 0x94, 0x60, 0xcd, 0x4a, 0xad, 0x5d, 0x67, 0x6d, 0xc6, 0x97, - 0x32, 0x4a, 0xb2, 0x76, 0xa7, 0xd9, 0xe8, 0xc9, 0xaa, 0x72, 0xdc, 0xea, 0x35, 0x8e, 0x64, 0x94, - 0x8a, 0xf7, 0xd5, 0x7f, 0x4d, 0x42, 0x69, 0xfa, 0x8a, 0x84, 0x7f, 0x00, 0xe7, 0xc3, 0xf7, 0x0c, - 0x8f, 0xfa, 0xea, 0x53, 0xc3, 0xe5, 0x5b, 0x66, 0x44, 0xc4, 0xf1, 0x15, 0x2d, 0xda, 0x66, 0xc0, - 0xea, 0x52, 0xff, 0x0b, 0xc3, 0x65, 0x1b, 0x62, 0x44, 0x7c, 0xdc, 0x84, 0x8b, 0x96, 0xad, 0x7a, - 0x3e, 0xb1, 0x74, 0xe2, 0xea, 0xea, 0xe4, 0x25, 0x49, 0x25, 0x9a, 0x46, 0x3d, 0xcf, 0x16, 0x47, - 0x55, 0x64, 0xe5, 0x5d, 0xcb, 0xee, 0x06, 0xe4, 0x49, 0x0d, 0xaf, 0x06, 0xd4, 0x99, 0x04, 0x4b, - 0x2d, 0x4b, 0xb0, 0x77, 0x20, 0x3f, 0x22, 0x8e, 0x4a, 0x2d, 0xdf, 0x3d, 0xe5, 0x8d, 0x71, 0x4e, - 0xc9, 0x8d, 0x88, 0x23, 0xb3, 0xf1, 0x9b, 0xb9, 0x9f, 0xfc, 0x23, 0x05, 0xc5, 0x78, 0x73, 0xcc, - 0xee, 0x1a, 0x1a, 0x3f, 0x47, 0x12, 0xbc, 0xd2, 0x7c, 0xf0, 0xd2, 0x56, 0xba, 0x52, 0x63, 0x07, - 0xcc, 0x7e, 0x56, 0xb4, 0xac, 0x8a, 0xd0, 0x64, 0x87, 0x3b, 0xab, 0x2d, 0x54, 0xb4, 0x08, 0x39, - 0x25, 0x18, 0xe1, 0x43, 0xc8, 0x3e, 0xf2, 0xb8, 0xed, 0x2c, 0xb7, 0xfd, 0xe1, 0xcb, 0x6d, 0xdf, - 0xef, 0x72, 0xe3, 0xf9, 0xfb, 0x5d, 0xb5, 0xd5, 0x56, 0x8e, 0xaa, 0x4d, 0x25, 0x50, 0xc7, 0x17, - 0x20, 0x6d, 0x92, 0xe7, 0xa7, 0xd3, 0x47, 0x11, 0x87, 0xce, 0x1a, 0xf8, 0x0b, 0x90, 0x7e, 0x4a, - 0xc9, 0xe3, 0xe9, 0x03, 0x80, 0x43, 0xaf, 0x31, 0xf5, 0xaf, 0x43, 0x86, 0xc7, 0x0b, 0x03, 0x04, - 0x11, 0x43, 0x6f, 0xe1, 0x1c, 0xa4, 0x6b, 0x6d, 0x85, 0xa5, 0x3f, 0x82, 0xa2, 0x40, 0xd5, 0x4e, - 0x43, 0xae, 0xc9, 0x28, 0x59, 0xbe, 0x05, 0x59, 0x11, 0x04, 0xb6, 0x35, 0xa2, 0x30, 0xa0, 0xb7, - 0x82, 0x61, 0x60, 0x23, 0x11, 0x4a, 0x8f, 0x8f, 0x0e, 0x64, 0x05, 0x25, 0xe3, 0xcb, 0xeb, 0x41, - 0x31, 0xde, 0x17, 0xbf, 0x99, 0x9c, 0xfa, 0x4b, 0x02, 0x0a, 0xb1, 0x3e, 0x97, 0x35, 0x28, 0xc4, - 0x34, 0xed, 0xa7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0x49, 0x01, 0x1c, 0xaa, 0x32, 0xe4, 0xac, 0x8b, - 0xf6, 0x46, 0x9c, 0xff, 0x5d, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x3b, 0x75, 0xf0, - 0xb7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x4e, 0xdd, 0xfb, 0x67, 0x12, 0x56, - 0xa7, 0xba, 0xc9, 0xb3, 0x7a, 0xf7, 0x15, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, - 0x54, 0x35, 0xe9, 0x13, 0x6a, 0x4a, 0x65, 0x5e, 0x28, 0xae, 0xbf, 0xbc, 0x5f, 0xad, 0x34, 0x26, - 0x7a, 0x4d, 0xa6, 0xb6, 0xbf, 0xd1, 0xa8, 0xcb, 0x47, 0x9d, 0x76, 0x4f, 0x6e, 0xd5, 0x1e, 0xaa, - 0xc7, 0xad, 0x1f, 0xb7, 0xda, 0x5f, 0xb4, 0x14, 0x64, 0xcc, 0xd0, 0x5e, 0xe3, 0x56, 0xef, 0x00, - 0x9a, 0x75, 0x0a, 0x9f, 0x87, 0x45, 0x6e, 0xa1, 0xb7, 0xf0, 0x06, 0xac, 0xb5, 0xda, 0x6a, 0xb7, - 0x51, 0x97, 0x55, 0xf9, 0xde, 0x3d, 0xb9, 0xd6, 0xeb, 0x8a, 0x17, 0x88, 0x88, 0xdd, 0x9b, 0xde, - 0xd4, 0xbf, 0x49, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0xf7, 0xcf, - 0xe2, 0x7d, 0x85, 0x1d, 0xf9, 0x1d, 0xe2, 0xfa, 0xc1, 0x55, 0xe3, 0x0a, 0xb0, 0x28, 0x59, 0xbe, - 0xd1, 0x37, 0xa8, 0x1b, 0x3c, 0xd8, 0x88, 0x0b, 0xc5, 0xda, 0x04, 0x17, 0x6f, 0x36, 0xdf, 0x03, - 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0x4f, 0xa8, 0x6a, 0x58, 0xe1, 0xeb, 0x0e, 0xbb, 0x60, 0xa4, 0x15, - 0x14, 0x4a, 0x1a, 0x96, 0x1f, 0xb1, 0x2d, 0x3a, 0x20, 0x33, 0x6c, 0x56, 0xc0, 0x53, 0x0a, 0x0a, - 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xe7, 0x45, 0x42, 0x29, - 0x08, 0x2c, 0xa2, 0x04, 0xfd, 0xf4, 0xe4, 0x59, 0xa9, 0xa8, 0x14, 0x04, 0x26, 0x28, 0x97, 0x61, - 0x8d, 0x0c, 0x06, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0x37, 0x84, 0x52, 0x04, 0x73, 0xe2, 0xd6, 0x7d, - 0xc8, 0x85, 0x71, 0x60, 0x47, 0x32, 0x8b, 0x84, 0xea, 0x88, 0x6b, 0x6f, 0x72, 0x27, 0xaf, 0xe4, - 0xac, 0x50, 0x78, 0x09, 0x8a, 0x86, 0xa7, 0x4e, 0x5e, 0xc9, 0x93, 0xdb, 0xc9, 0x9d, 0x9c, 0x52, - 0x30, 0xbc, 0xe8, 0x85, 0xb1, 0xfc, 0x75, 0x12, 0x4a, 0xd3, 0xaf, 0xfc, 0xb8, 0x0e, 0x39, 0xd3, - 0xd6, 0x08, 0x4f, 0x2d, 0xf1, 0x13, 0xd3, 0xce, 0x2b, 0x7e, 0x18, 0xa8, 0x34, 0x03, 0xbe, 0x12, - 0x69, 0x6e, 0xfd, 0x2d, 0x01, 0xb9, 0x10, 0xc6, 0xe7, 0x20, 0xed, 0x10, 0x7f, 0xc8, 0xcd, 0x65, - 0x0e, 0x92, 0x28, 0xa1, 0xf0, 0x31, 0xc3, 0x3d, 0x87, 0x58, 0x3c, 0x05, 0x02, 0x9c, 0x8d, 0xd9, - 0xba, 0x9a, 0x94, 0xe8, 0xfc, 0xfa, 0x61, 0x8f, 0x46, 0xd4, 0xf2, 0xbd, 0x70, 0x5d, 0x03, 0xbc, - 0x16, 0xc0, 0xf8, 0x1a, 0xac, 0xfb, 0x2e, 0x31, 0xcc, 0x29, 0x6e, 0x9a, 0x73, 0x51, 0x28, 0x88, - 0xc8, 0xfb, 0x70, 0x21, 0xb4, 0xab, 0x53, 0x9f, 0x68, 0x43, 0xaa, 0x4f, 0x94, 0xb2, 0xfc, 0x99, - 0xe1, 0x7c, 0x40, 0xa8, 0x07, 0xf2, 0x50, 0xb7, 0xfc, 0xf7, 0x04, 0xac, 0x87, 0x17, 0x26, 0x3d, - 0x0a, 0xd6, 0x11, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, 0x6b, 0x3e, 0x95, 0xe7, 0xf4, 0x2a, 0xd5, - 0x48, 0x49, 0x89, 0x19, 0xd8, 0x1a, 0x01, 0x4c, 0x24, 0x4b, 0xc3, 0x76, 0x11, 0x0a, 0xc1, 0x4f, - 0x38, 0xfc, 0x77, 0x40, 0x71, 0xc5, 0x06, 0x01, 0xb1, 0x9b, 0x15, 0xde, 0x84, 0xcc, 0x09, 0x1d, - 0x18, 0x56, 0xf0, 0x30, 0x2b, 0x06, 0xe1, 0x43, 0x48, 0x3a, 0x7a, 0x08, 0x39, 0xf8, 0x19, 0x6c, - 0x68, 0xf6, 0x68, 0xd6, 0xdd, 0x03, 0x34, 0x73, 0xcd, 0xf7, 0x3e, 0x4f, 0x7c, 0x09, 0x93, 0x16, - 0xf3, 0x7f, 0x89, 0xc4, 0xef, 0x93, 0xa9, 0xc3, 0xce, 0xc1, 0x1f, 0x92, 0x5b, 0x87, 0x42, 0xb5, - 0x13, 0xce, 0x54, 0xa1, 0x7d, 0x93, 0x6a, 0xcc, 0xfb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3, - 0x58, 0x22, 0x30, 0xdf, 0x1c, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go deleted file mode 100644 index 3b95a77..0000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ /dev/null @@ -1,772 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: descriptor.proto - -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ -package descriptor - -import fmt "fmt" -import strings "strings" -import proto "github.com/gogo/protobuf/proto" -import sort "sort" -import strconv "strconv" -import reflect "reflect" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (this *FileDescriptorSet) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.FileDescriptorSet{") - if this.File != nil { - s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 16) - s = append(s, "&descriptor.FileDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Package != nil { - s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") - } - if this.Dependency != nil { - s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") - } - if this.PublicDependency != nil { - s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") - } - if this.WeakDependency != nil { - s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") - } - if this.MessageType != nil { - s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") - } - if this.EnumType != nil { - s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") - } - if this.Service != nil { - s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") - } - if this.Extension != nil { - s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.SourceCodeInfo != nil { - s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") - } - if this.Syntax != nil { - s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&descriptor.DescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Field != nil { - s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") - } - if this.Extension != nil { - s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") - } - if this.NestedType != nil { - s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") - } - if this.EnumType != nil { - s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") - } - if this.ExtensionRange != nil { - s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") - } - if this.OneofDecl != nil { - s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ReservedRange != nil { - s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") - } - if this.ReservedName != nil { - s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto_ExtensionRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto_ReservedRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.DescriptorProto_ReservedRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ExtensionRangeOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.ExtensionRangeOptions{") - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FieldDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&descriptor.FieldDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Number != nil { - s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") - } - if this.Label != nil { - s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") - } - if this.Type != nil { - s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") - } - if this.TypeName != nil { - s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") - } - if this.Extendee != nil { - s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") - } - if this.DefaultValue != nil { - s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") - } - if this.OneofIndex != nil { - s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") - } - if this.JsonName != nil { - s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *OneofDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.OneofDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.EnumDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Value != nil { - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ReservedRange != nil { - s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") - } - if this.ReservedName != nil { - s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumValueDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.EnumValueDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Number != nil { - s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ServiceDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.ServiceDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Method != nil { - s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MethodDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&descriptor.MethodDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.InputType != nil { - s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") - } - if this.OutputType != nil { - s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ClientStreaming != nil { - s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") - } - if this.ServerStreaming != nil { - s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 23) - s = append(s, "&descriptor.FileOptions{") - if this.JavaPackage != nil { - s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") - } - if this.JavaOuterClassname != nil { - s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") - } - if this.JavaMultipleFiles != nil { - s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") - } - if this.JavaGenerateEqualsAndHash != nil { - s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") - } - if this.JavaStringCheckUtf8 != nil { - s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") - } - if this.OptimizeFor != nil { - s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") - } - if this.GoPackage != nil { - s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") - } - if this.CcGenericServices != nil { - s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") - } - if this.JavaGenericServices != nil { - s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") - } - if this.PyGenericServices != nil { - s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") - } - if this.PhpGenericServices != nil { - s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.CcEnableArenas != nil { - s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") - } - if this.ObjcClassPrefix != nil { - s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") - } - if this.CsharpNamespace != nil { - s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") - } - if this.SwiftPrefix != nil { - s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") - } - if this.PhpClassPrefix != nil { - s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") - } - if this.PhpNamespace != nil { - s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MessageOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.MessageOptions{") - if this.MessageSetWireFormat != nil { - s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") - } - if this.NoStandardDescriptorAccessor != nil { - s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.MapEntry != nil { - s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FieldOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&descriptor.FieldOptions{") - if this.Ctype != nil { - s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") - } - if this.Packed != nil { - s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") - } - if this.Jstype != nil { - s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") - } - if this.Lazy != nil { - s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.Weak != nil { - s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *OneofOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.OneofOptions{") - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.EnumOptions{") - if this.AllowAlias != nil { - s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumValueOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.EnumValueOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ServiceOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.ServiceOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MethodOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.MethodOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.IdempotencyLevel != nil { - s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UninterpretedOption) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&descriptor.UninterpretedOption{") - if this.Name != nil { - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - } - if this.IdentifierValue != nil { - s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") - } - if this.PositiveIntValue != nil { - s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") - } - if this.NegativeIntValue != nil { - s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") - } - if this.DoubleValue != nil { - s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") - } - if this.StringValue != nil { - s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") - } - if this.AggregateValue != nil { - s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UninterpretedOption_NamePart) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.UninterpretedOption_NamePart{") - if this.NamePart != nil { - s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") - } - if this.IsExtension != nil { - s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SourceCodeInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.SourceCodeInfo{") - if this.Location != nil { - s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SourceCodeInfo_Location) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.SourceCodeInfo_Location{") - if this.Path != nil { - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - } - if this.Span != nil { - s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") - } - if this.LeadingComments != nil { - s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") - } - if this.TrailingComments != nil { - s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") - } - if this.LeadingDetachedComments != nil { - s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GeneratedCodeInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.GeneratedCodeInfo{") - if this.Annotation != nil { - s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GeneratedCodeInfo_Annotation) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") - if this.Path != nil { - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - } - if this.SourceFile != nil { - s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") - } - if this.Begin != nil { - s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringDescriptor(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func extensionToGoStringDescriptor(m proto.Message) string { - e := proto.GetUnsafeExtensionsMap(m) - if e == nil { - return "nil" - } - s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" - keys := make([]int, 0, len(e)) - for k := range e { - keys = append(keys, int(k)) - } - sort.Ints(keys) - ss := []string{} - for _, k := range keys { - ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) - } - s += strings.Join(ss, ",") + "})" - return s -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go deleted file mode 100644 index e0846a3..0000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ /dev/null @@ -1,390 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package descriptor - -import ( - "strings" -) - -func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { - if !msg.GetOptions().GetMapEntry() { - return nil, nil - } - return msg.GetField()[0], msg.GetField()[1] -} - -func dotToUnderscore(r rune) rune { - if r == '.' { - return '_' - } - return r -} - -func (field *FieldDescriptorProto) WireType() (wire int) { - switch *field.Type { - case FieldDescriptorProto_TYPE_DOUBLE: - return 1 - case FieldDescriptorProto_TYPE_FLOAT: - return 5 - case FieldDescriptorProto_TYPE_INT64: - return 0 - case FieldDescriptorProto_TYPE_UINT64: - return 0 - case FieldDescriptorProto_TYPE_INT32: - return 0 - case FieldDescriptorProto_TYPE_UINT32: - return 0 - case FieldDescriptorProto_TYPE_FIXED64: - return 1 - case FieldDescriptorProto_TYPE_FIXED32: - return 5 - case FieldDescriptorProto_TYPE_BOOL: - return 0 - case FieldDescriptorProto_TYPE_STRING: - return 2 - case FieldDescriptorProto_TYPE_GROUP: - return 2 - case FieldDescriptorProto_TYPE_MESSAGE: - return 2 - case FieldDescriptorProto_TYPE_BYTES: - return 2 - case FieldDescriptorProto_TYPE_ENUM: - return 0 - case FieldDescriptorProto_TYPE_SFIXED32: - return 5 - case FieldDescriptorProto_TYPE_SFIXED64: - return 1 - case FieldDescriptorProto_TYPE_SINT32: - return 0 - case FieldDescriptorProto_TYPE_SINT64: - return 0 - } - panic("unreachable") -} - -func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { - packed := field.IsPacked() - wireType := field.WireType() - fieldNumber := field.GetNumber() - if packed { - wireType = 2 - } - x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) - return x -} - -func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { - packed := field.IsPacked3() - wireType := field.WireType() - fieldNumber := field.GetNumber() - if packed { - wireType = 2 - } - x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) - return x -} - -func (field *FieldDescriptorProto) GetKey() []byte { - x := field.GetKeyUint64() - i := 0 - keybuf := make([]byte, 0) - for i = 0; x > 127; i++ { - keybuf = append(keybuf, 0x80|uint8(x&0x7F)) - x >>= 7 - } - keybuf = append(keybuf, uint8(x)) - return keybuf -} - -func (field *FieldDescriptorProto) GetKey3() []byte { - x := field.GetKey3Uint64() - i := 0 - keybuf := make([]byte, 0) - for i = 0; x > 127; i++ { - keybuf = append(keybuf, 0x80|uint8(x&0x7F)) - x >>= 7 - } - keybuf = append(keybuf, uint8(x)) - return keybuf -} - -func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { - msg := desc.GetMessage(packageName, messageName) - if msg == nil { - return nil - } - for _, field := range msg.GetField() { - if field.GetName() == fieldName { - return field - } - } - return nil -} - -func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return msg - } - nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) - if nes != nil { - return nes - } - } - return nil -} - -func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return nes - } - res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) - if res != nil { - return res - } - } - return nil -} - -func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return msg - } - } - for _, msg := range file.GetMessageType() { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return nes - } - if msg.GetName()+"."+nes.GetName() == typeName { - return nes - } - } - } - } - return nil -} - -func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - } - for _, msg := range file.GetMessageType() { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - if msg.GetName()+"."+nes.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - } - } - } - return false -} - -func (msg *DescriptorProto) IsExtendable() bool { - return len(msg.GetExtensionRange()) > 0 -} - -func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", nil - } - if !parent.IsExtendable() { - return "", nil - } - extendee := "." + packageName + "." + typeName - for _, file := range desc.GetFile() { - for _, ext := range file.GetExtension() { - if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { - if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { - continue - } - } else { - if ext.GetExtendee() != extendee { - continue - } - } - if ext.GetName() == fieldName { - return file.GetPackage(), ext - } - } - } - return "", nil -} - -func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", nil - } - if !parent.IsExtendable() { - return "", nil - } - extendee := "." + packageName + "." + typeName - for _, file := range desc.GetFile() { - for _, ext := range file.GetExtension() { - if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { - if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { - continue - } - } else { - if ext.GetExtendee() != extendee { - continue - } - } - if ext.GetNumber() == fieldNum { - return file.GetPackage(), ext - } - } - } - return "", nil -} - -func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", "" - } - field := parent.GetFieldDescriptor(fieldName) - if field == nil { - var extPackageName string - extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) - if field == nil { - return "", "" - } - packageName = extPackageName - } - typeNames := strings.Split(field.GetTypeName(), ".") - if len(typeNames) == 1 { - msg := desc.GetMessage(packageName, typeName) - if msg == nil { - return "", "" - } - return packageName, msg.GetName() - } - if len(typeNames) > 2 { - for i := 1; i < len(typeNames)-1; i++ { - packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") - typeName = strings.Join(typeNames[len(typeNames)-i:], ".") - msg := desc.GetMessage(packageName, typeName) - if msg != nil { - typeNames := strings.Split(msg.GetName(), ".") - if len(typeNames) == 1 { - return packageName, msg.GetName() - } - return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] - } - } - } - return "", "" -} - -func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { - for _, field := range msg.GetField() { - if field.GetName() == fieldName { - return field - } - } - return nil -} - -func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, enum := range file.GetEnumType() { - if enum.GetName() == typeName { - return enum - } - } - } - return nil -} - -func (f *FieldDescriptorProto) IsEnum() bool { - return *f.Type == FieldDescriptorProto_TYPE_ENUM -} - -func (f *FieldDescriptorProto) IsMessage() bool { - return *f.Type == FieldDescriptorProto_TYPE_MESSAGE -} - -func (f *FieldDescriptorProto) IsBytes() bool { - return *f.Type == FieldDescriptorProto_TYPE_BYTES -} - -func (f *FieldDescriptorProto) IsRepeated() bool { - return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED -} - -func (f *FieldDescriptorProto) IsString() bool { - return *f.Type == FieldDescriptorProto_TYPE_STRING -} - -func (f *FieldDescriptorProto) IsBool() bool { - return *f.Type == FieldDescriptorProto_TYPE_BOOL -} - -func (f *FieldDescriptorProto) IsRequired() bool { - return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED -} - -func (f *FieldDescriptorProto) IsPacked() bool { - return f.Options != nil && f.GetOptions().GetPacked() -} - -func (f *FieldDescriptorProto) IsPacked3() bool { - if f.IsRepeated() && f.IsScalar() { - if f.Options == nil || f.GetOptions().Packed == nil { - return true - } - return f.Options != nil && f.GetOptions().GetPacked() - } - return false -} - -func (m *DescriptorProto) HasExtension() bool { - return len(m.ExtensionRange) > 0 -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go index 150d91b..2a56fb5 100644 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -26,4 +26,4 @@ func extendMatch(src []byte, i, j int) int // encodeBlock has the same semantics as in encode_other.go. // //go:noescape -func encodeBlock(dst, src []byte) (d int) +func encodeBlock(dst, src []byte) (d int) \ No newline at end of file diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go index c7f445f..0cf5e37 100644 --- a/vendor/github.com/golang/snappy/snappy.go +++ b/vendor/github.com/golang/snappy/snappy.go @@ -6,7 +6,7 @@ // It aims for very high speeds and reasonable compression. // // The C++ snappy implementation is at https://github.com/google/snappy -package snappy +package snappy // import "github.com/golang/snappy" import ( "hash/crc32" diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml new file mode 100644 index 0000000..6f440f1 --- /dev/null +++ b/vendor/github.com/gorilla/context/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/LICENSE b/vendor/github.com/gorilla/context/LICENSE similarity index 83% rename from vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/LICENSE rename to vendor/github.com/gorilla/context/LICENSE index f1f6cfc..0e5fb87 100644 --- a/vendor/github.com/armpelionedge/edge-go-logger/vendor/github.com/op/go-logging/LICENSE +++ b/vendor/github.com/gorilla/context/LICENSE @@ -1,16 +1,16 @@ -Copyright (c) 2013 Örjan Persson. All rights reserved. +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md new file mode 100644 index 0000000..08f8669 --- /dev/null +++ b/vendor/github.com/gorilla/context/README.md @@ -0,0 +1,10 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +> Note: gorilla/context, having been born well before `context.Context` existed, does not play well +> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go new file mode 100644 index 0000000..81cb128 --- /dev/null +++ b/vendor/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go new file mode 100644 index 0000000..448d1bf --- /dev/null +++ b/vendor/github.com/gorilla/context/doc.go @@ -0,0 +1,88 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +Note: gorilla/context, having been born well before `context.Context` existed, +does not play well > with the shallow copying of the request that +[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) +(added to net/http Go 1.7 onwards) performs. You should either use *just* +gorilla/context, or moving forward, the new `http.Request.Context()`. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/vendor/github.com/onsi/ginkgo/before_pr.sh b/vendor/github.com/onsi/ginkgo/before_pr.sh old mode 100755 new mode 100644 diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index 839e3a6..a3c021d 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -36,7 +36,7 @@ // // See http://blog.golang.org/context for example code for a server that uses // Contexts. -package context +package context // import "golang.org/x/net/context" // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go index 227404b..cd0a8ac 100644 --- a/vendor/golang.org/x/net/html/atom/atom.go +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -15,7 +15,7 @@ // whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to // be dense. The only guarantees are that e.g. looking up "div" will yield // atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. -package atom +package atom // import "golang.org/x/net/html/atom" // Atom is an integer code for a string. The zero value maps to "". type Atom uint32 diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go index 4496072..13bed15 100644 --- a/vendor/golang.org/x/net/html/charset/charset.go +++ b/vendor/golang.org/x/net/html/charset/charset.go @@ -6,7 +6,7 @@ // // The mapping from encoding labels to encodings is defined at // https://encoding.spec.whatwg.org/. -package charset +package charset // import "golang.org/x/net/html/charset" import ( "bytes" diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index e3ec457..822ed42 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -93,7 +93,7 @@ The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and https://html.spec.whatwg.org/multipage/syntax.html#tokenization */ -package html +package html // import "golang.org/x/net/html" // The tokenization algorithm implemented by this package is not a line-by-line // transliteration of the relatively verbose state-machine in the WHATWG diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_darwin.pl b/vendor/golang.org/x/sys/unix/mksysnum_darwin.pl old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl b/vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl old mode 100755 new mode 100644 diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index fb74877..ef35fce 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -22,7 +22,7 @@ // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.Errno. -package unix +package unix // import "golang.org/x/sys/unix" import "strings" diff --git a/vendor/golang.org/x/text/encoding/charmap/charmap.go b/vendor/golang.org/x/text/encoding/charmap/charmap.go index 48beea8..e89ff07 100644 --- a/vendor/golang.org/x/text/encoding/charmap/charmap.go +++ b/vendor/golang.org/x/text/encoding/charmap/charmap.go @@ -6,7 +6,7 @@ // Package charmap provides simple character encodings such as IBM Code Page 437 // and Windows 1252. -package charmap +package charmap // import "golang.org/x/text/encoding/charmap" import ( "unicode/utf8" diff --git a/vendor/golang.org/x/text/encoding/encoding.go b/vendor/golang.org/x/text/encoding/encoding.go index abc2c9a..221f175 100644 --- a/vendor/golang.org/x/text/encoding/encoding.go +++ b/vendor/golang.org/x/text/encoding/encoding.go @@ -8,7 +8,7 @@ // Encoding implementations are provided in other packages, such as // golang.org/x/text/encoding/charmap and // golang.org/x/text/encoding/japanese. -package encoding +package encoding // import "golang.org/x/text/encoding" import ( "errors" diff --git a/vendor/golang.org/x/text/encoding/japanese/tables.go b/vendor/golang.org/x/text/encoding/japanese/tables.go index 1108e83..8717b79 100644 --- a/vendor/golang.org/x/text/encoding/japanese/tables.go +++ b/vendor/golang.org/x/text/encoding/japanese/tables.go @@ -1,7 +1,7 @@ // generated by go run maketables.go; DO NOT EDIT // Package japanese provides Japanese encodings such as EUC-JP and Shift JIS. -package japanese +package japanese // import "golang.org/x/text/encoding/japanese" // jis0208Decode is the decoding table from JIS 0208 code to Unicode. // It is defined at http://encoding.spec.whatwg.org/index-jis0208.txt diff --git a/vendor/golang.org/x/text/encoding/korean/tables.go b/vendor/golang.org/x/text/encoding/korean/tables.go index eb8b451..0480e85 100644 --- a/vendor/golang.org/x/text/encoding/korean/tables.go +++ b/vendor/golang.org/x/text/encoding/korean/tables.go @@ -1,7 +1,7 @@ // generated by go run maketables.go; DO NOT EDIT // Package korean provides Korean encodings such as EUC-KR. -package korean +package korean // import "golang.org/x/text/encoding/korean" // decode is the decoding table from EUC-KR code to Unicode. // It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go index fac299d..415f52a 100644 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go +++ b/vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go @@ -1,7 +1,7 @@ // generated by go run maketables.go; DO NOT EDIT // Package simplifiedchinese provides Simplified Chinese encodings such as GBK. -package simplifiedchinese +package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese" // gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt var gb18030 = [...][2]uint16{ diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/tables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/tables.go index b0d23c7..d909e38 100644 --- a/vendor/golang.org/x/text/encoding/traditionalchinese/tables.go +++ b/vendor/golang.org/x/text/encoding/traditionalchinese/tables.go @@ -1,7 +1,7 @@ // generated by go run maketables.go; DO NOT EDIT // Package traditionalchinese provides Traditional Chinese encodings such as Big5. -package traditionalchinese +package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese" // decode is the decoding table from Big5 code to Unicode. // It is defined at http://encoding.spec.whatwg.org/index-big5.txt diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go index f3f2c4e..579cadf 100644 --- a/vendor/golang.org/x/text/encoding/unicode/unicode.go +++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package unicode provides Unicode encodings such as UTF-16. -package unicode +package unicode // import "golang.org/x/text/encoding/unicode" import ( "errors" diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go index 2cf4ecd..b5d3488 100644 --- a/vendor/golang.org/x/text/internal/tag/tag.go +++ b/vendor/golang.org/x/text/internal/tag/tag.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package tag contains functionality handling tags and related data. -package tag +package tag // import "golang.org/x/text/internal/tag" import "sort" diff --git a/vendor/golang.org/x/text/language/doc.go b/vendor/golang.org/x/text/language/doc.go index 17a1a27..8afecd5 100644 --- a/vendor/golang.org/x/text/language/doc.go +++ b/vendor/golang.org/x/text/language/doc.go @@ -96,7 +96,7 @@ // // BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47 // -package language +package language // import "golang.org/x/text/language" // TODO: explanation on how to match languages for your own locale-specific // service. diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go index 6a3195c..7193369 100644 --- a/vendor/golang.org/x/text/runes/runes.go +++ b/vendor/golang.org/x/text/runes/runes.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package runes provide transforms for UTF-8 encoded text. -package runes +package runes // import "golang.org/x/text/runes" import ( "unicode" diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go index 24dcca6..fe47b9b 100644 --- a/vendor/golang.org/x/text/transform/transform.go +++ b/vendor/golang.org/x/text/transform/transform.go @@ -6,7 +6,7 @@ // bytes passing through as well as various transformations. Example // transformations provided by other packages include normalization and // conversion between character sets. -package transform +package transform // import "golang.org/x/text/transform" import ( "bytes" diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 0000000..94d33c2 --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,144 @@ +# github.com/WigWagCo/wigwag-go-logger v0.0.0-20181019204055-004c5464cc0e +github.com/WigWagCo/wigwag-go-logger/logging +# github.com/armPelionEdge/devicedb v0.0.0-20200515102353-d24df289ab24 +github.com/armPelionEdge/devicedb/benchmarks +github.com/armPelionEdge/devicedb/bucket +github.com/armPelionEdge/devicedb/client +github.com/armPelionEdge/devicedb/cluster +github.com/armPelionEdge/devicedb/compatibility +github.com/armPelionEdge/devicedb/data +github.com/armPelionEdge/devicedb/error +github.com/armPelionEdge/devicedb/historian +github.com/armPelionEdge/devicedb/logging +github.com/armPelionEdge/devicedb/merkle +github.com/armPelionEdge/devicedb/node +github.com/armPelionEdge/devicedb/raft +github.com/armPelionEdge/devicedb/routes +github.com/armPelionEdge/devicedb/server +github.com/armPelionEdge/devicedb/shared +github.com/armPelionEdge/devicedb/storage +github.com/armPelionEdge/devicedb/util +github.com/armPelionEdge/devicedb/version +github.com/armPelionEdge/devicedb/sync +github.com/armPelionEdge/devicedb/resolver +github.com/armPelionEdge/devicedb/resolver/strategies +github.com/armPelionEdge/devicedb/rest +github.com/armPelionEdge/devicedb/transport +github.com/armPelionEdge/devicedb/bucket/builtin +github.com/armPelionEdge/devicedb/clusterio +github.com/armPelionEdge/devicedb/node_facade +github.com/armPelionEdge/devicedb/partition +github.com/armPelionEdge/devicedb/site +github.com/armPelionEdge/devicedb/transfer +github.com/armPelionEdge/devicedb/alerts +github.com/armPelionEdge/devicedb/client_relay +# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 +github.com/beorn7/perks/quantile +# github.com/coreos/etcd v0.0.0-20170725052840-d2654f852232 +github.com/coreos/etcd/raft +github.com/coreos/etcd/raft/raftpb +# github.com/golang/protobuf v0.0.0-20170712042213-0a4f71a498b7 +github.com/golang/protobuf/proto +# github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec +github.com/golang/snappy +# github.com/google/uuid v0.0.0-20171129191014-dec09d789f3d +github.com/google/uuid +# github.com/gorilla/context v1.1.1 +github.com/gorilla/context +# github.com/gorilla/mux v0.0.0-20160902153343-0a192a193177 +github.com/gorilla/mux +# github.com/gorilla/websocket v0.0.0-20181012020131-76e4896901ef +github.com/gorilla/websocket +# github.com/mattn/go-runewidth v0.0.3 +github.com/mattn/go-runewidth +# github.com/matttproud/golang_protobuf_extensions v1.0.1 +github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84 +github.com/olekukonko/tablewriter +# github.com/onsi/ginkgo v0.0.0-20171214073015-bc14b6691e7a +github.com/onsi/ginkgo +github.com/onsi/ginkgo/config +github.com/onsi/ginkgo/internal/codelocation +github.com/onsi/ginkgo/internal/failer +github.com/onsi/ginkgo/internal/remote +github.com/onsi/ginkgo/internal/suite +github.com/onsi/ginkgo/internal/testingtproxy +github.com/onsi/ginkgo/internal/writer +github.com/onsi/ginkgo/reporters +github.com/onsi/ginkgo/reporters/stenographer +github.com/onsi/ginkgo/types +github.com/onsi/ginkgo/internal/spec_iterator +github.com/onsi/ginkgo/internal/containernode +github.com/onsi/ginkgo/internal/leafnodes +github.com/onsi/ginkgo/internal/spec +github.com/onsi/ginkgo/internal/specrunner +github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable +github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty +# github.com/onsi/gomega v0.0.0-20171211090144-c1fb6682134d +github.com/onsi/gomega +github.com/onsi/gomega/ghttp +github.com/onsi/gomega/internal/assertion +github.com/onsi/gomega/internal/asyncassertion +github.com/onsi/gomega/internal/testingtsupport +github.com/onsi/gomega/matchers +github.com/onsi/gomega/types +github.com/onsi/gomega/internal/oraclematcher +github.com/onsi/gomega/format +github.com/onsi/gomega/matchers/support/goraph/bipartitegraph +github.com/onsi/gomega/matchers/support/goraph/edge +github.com/onsi/gomega/matchers/support/goraph/node +github.com/onsi/gomega/matchers/support/goraph/util +# github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 +github.com/op/go-logging +# github.com/prometheus/client_golang v0.0.0-20180416233856-82f5ff156b29 +github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/promhttp +# github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 +github.com/prometheus/client_model/go +# github.com/prometheus/common v0.0.0-20180426121432-d811d2e9bf89 +github.com/prometheus/common/expfmt +github.com/prometheus/common/model +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +# github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d +github.com/prometheus/procfs +github.com/prometheus/procfs/nfs +github.com/prometheus/procfs/xfs +github.com/prometheus/procfs/internal/util +# github.com/syndtr/goleveldb v0.0.0-20160629101233-ab8b5dcf1042 +github.com/syndtr/goleveldb/leveldb +github.com/syndtr/goleveldb/leveldb/errors +github.com/syndtr/goleveldb/leveldb/iterator +github.com/syndtr/goleveldb/leveldb/opt +github.com/syndtr/goleveldb/leveldb/util +github.com/syndtr/goleveldb/leveldb/cache +github.com/syndtr/goleveldb/leveldb/comparer +github.com/syndtr/goleveldb/leveldb/filter +github.com/syndtr/goleveldb/leveldb/journal +github.com/syndtr/goleveldb/leveldb/memdb +github.com/syndtr/goleveldb/leveldb/storage +github.com/syndtr/goleveldb/leveldb/table +# golang.org/x/net v0.0.0-20171212005608-d866cfc389ce +golang.org/x/net/context +golang.org/x/net/html/charset +golang.org/x/net/html +golang.org/x/net/html/atom +# golang.org/x/sys v0.0.0-20180420145319-79b0c6888797 +golang.org/x/sys/unix +# golang.org/x/text v0.3.0 +golang.org/x/text/encoding +golang.org/x/text/encoding/charmap +golang.org/x/text/encoding/htmlindex +golang.org/x/text/transform +golang.org/x/text/encoding/internal/identifier +golang.org/x/text/encoding/internal +golang.org/x/text/encoding/japanese +golang.org/x/text/encoding/korean +golang.org/x/text/encoding/simplifiedchinese +golang.org/x/text/encoding/traditionalchinese +golang.org/x/text/encoding/unicode +golang.org/x/text/language +golang.org/x/text/internal/utf8internal +golang.org/x/text/runes +golang.org/x/text/internal/tag +# gopkg.in/yaml.v2 v2.0.0-20160715033755-e4d366fc3c79 +gopkg.in/yaml.v2