diff --git a/hotelReservation/Docker/._.DS_Store b/hotelReservation/Docker/._.DS_Store deleted file mode 100644 index 0750a5f92..000000000 Binary files a/hotelReservation/Docker/._.DS_Store and /dev/null differ diff --git a/hotelReservation/Docker/.gitignore b/hotelReservation/Docker/.gitignore deleted file mode 100644 index 4f382da12..000000000 --- a/hotelReservation/Docker/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -cmd/*/data -cmd/api/api -cmd/auth/auth -cmd/geo/geo -cmd/profile/profile -cmd/rate/rate -cmd/www/www -cmd/search/search diff --git a/hotelReservation/Dockerfile b/hotelReservation/Dockerfile index df813ebad..f4719a9c3 100644 --- a/hotelReservation/Dockerfile +++ b/hotelReservation/Dockerfile @@ -1,11 +1,30 @@ -FROM golang:1.17.3 - -RUN git config --global http.sslverify false -COPY . /go/src/github.com/harlow/go-micro-services -WORKDIR /go/src/github.com/harlow/go-micro-services -RUN go get gopkg.in/mgo.v2 -RUN go get github.com/bradfitz/gomemcache/memcache -RUN go get github.com/google/uuid -RUN go mod init -RUN go mod vendor -RUN go install -ldflags="-s -w" ./cmd/... +FROM golang:1.21 as builder + +WORKDIR /workspace + +COPY go.sum go.sum +COPY go.mod go.mod +COPY vendor/ vendor/ + +COPY cmd/ cmd/ +COPY dialer/ dialer/ +COPY registry/ registry/ +COPY services/ services/ +COPY tls/ tls/ +COPY tracing/ tracing/ +COPY tune/ tune/ + +RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go install -ldflags="-s -w" -mod=vendor ./cmd/... + +FROM gcr.io/distroless/static:nonroot + +WORKDIR / + +COPY --from=builder /go/bin/frontend . +COPY --from=builder /go/bin/geo . +COPY --from=builder /go/bin/profile . +COPY --from=builder /go/bin/rate . +COPY --from=builder /go/bin/recommendation . +COPY --from=builder /go/bin/reservation . +COPY --from=builder /go/bin/search . +COPY --from=builder /go/bin/user . diff --git a/hotelReservation/Gopkg.lock b/hotelReservation/Gopkg.lock deleted file mode 100644 index 24a9151ab..000000000 --- a/hotelReservation/Gopkg.lock +++ /dev/null @@ -1,194 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/apache/thrift" - packages = ["lib/go/thrift"] - revision = "b2a4d4ae21c789b689dd162deb819665567f481c" - version = "0.10.0" - -[[projects]] - branch = "master" - name = "github.com/codahale/hdrhistogram" - packages = ["."] - revision = "3a0bb77429bd3a61596f5e8a3172445844342120" - -[[projects]] - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp" - ] - revision = "925541529c1fa6821df4e44ce2723319eb2be768" - version = "v1.5.3" - -[[projects]] - branch = "master" - name = "github.com/grpc-ecosystem/grpc-opentracing" - packages = ["go/otgrpc"] - revision = "0e7658f8ee99ee5aa683e2a032b8880091b7a055" - -[[projects]] - branch = "master" - name = "github.com/hailocab/go-geoindex" - packages = ["."] - revision = "64631bfe9711db743898df0295316784c876d29c" - -[[projects]] - name = "github.com/hashicorp/consul" - packages = ["api"] - revision = "9a494b5fb9c86180a5702e29c485df1507a47198" - version = "v1.0.6" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" - -[[projects]] - name = "github.com/hashicorp/serf" - packages = ["coordinate"] - revision = "d6574a5bb1226678d7010325fb6c985db20ee458" - version = "v0.8.1" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" - -[[projects]] - name = "github.com/olivere/grpc" - packages = ["lb/consul"] - revision = "fec704fef18da7cd0da77c6840bed0b25b07d570" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/opentracing-contrib/go-stdlib" - packages = ["nethttp"] - revision = "f6b9967a3c699b16f941872d6115b2860915bd5b" - -[[projects]] - name = "github.com/opentracing/opentracing-go" - packages = [ - ".", - "ext", - "log" - ] - revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" - version = "v1.0.2" - -[[projects]] - name = "github.com/uber/jaeger-client-go" - packages = [ - ".", - "config", - "internal/baggage", - "internal/baggage/remote", - "internal/spanlog", - "log", - "rpcmetrics", - "thrift-gen/agent", - "thrift-gen/baggage", - "thrift-gen/jaeger", - "thrift-gen/sampling", - "thrift-gen/zipkincore", - "utils" - ] - revision = "3ac96c6e679cb60a74589b0d0aa7c70a906183f7" - version = "v2.11.2" - -[[projects]] - name = "github.com/uber/jaeger-lib" - packages = ["metrics"] - revision = "4267858c0679cd4e47cefed8d7f70fd386cfb567" - version = "v1.4.0" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = [ - "context", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "lex/httplex", - "trace" - ] - revision = "07e8617a6db2368fa55d4616f371ee1b1403c817" - -[[projects]] - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable" - ] - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - revision = "df60624c1e9b9d2973e889c7a1cff73155da81c4" - -[[projects]] - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclb/grpc_lb_v1/messages", - "grpclog", - "internal", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - "transport" - ] - revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655" - version = "v1.10.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "d6e37e604116f8edd7366fc8611caf6f8bf54e30de7c5c3efe3a87c232ffa3b3" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/hotelReservation/Gopkg.toml b/hotelReservation/Gopkg.toml deleted file mode 100644 index daccda322..000000000 --- a/hotelReservation/Gopkg.toml +++ /dev/null @@ -1,58 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.5.3" - -[[constraint]] - branch = "master" - name = "github.com/hailocab/go-geoindex" - -[[constraint]] - name = "github.com/opentracing/opentracing-go" - version = "1.0.2" - -[[constraint]] - name = "github.com/uber/jaeger-client-go" - version = "2.11.2" - -[[constraint]] - name = "github.com/uber/jaeger-lib" - version = "1.4.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.10.0" - -[prune] - go-tests = true - unused-packages = true diff --git a/hotelReservation/README.md b/hotelReservation/README.md index bca788e0c..545de1743 100644 --- a/hotelReservation/README.md +++ b/hotelReservation/README.md @@ -17,7 +17,7 @@ Supported actions: - luarocks (apt-get install luarocks) - luasocket (luarocks install luasocket) -## Running the social network application +## Running the hotel reservation application ### Before you start - Install Docker and Docker Compose. - Make sure exposed ports in docker-compose files are available @@ -29,11 +29,24 @@ Supported actions: ### Running the containers ##### Docker-compose -- NOTLS: Start docker containers by running `docker-compose up -d`. All images will be pulled from Docker Hub. -- TLS: Start docker containers by running `TLS=1 docker-compose up -d`. All the gRPC communications will be protected by TLS. -- TLS with spcified ciphersuite: Start docker containers by running `TLS=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 docker-compose up -d`. The available cipher suite can be find at the file [options.go](tls/options.go#L21). +Start docker containers by running `docker-compose up -d`. All images will be pulled from Docker Hub. -Check if TLS is enabled or not: `docker-compose logs | grep TLS`. +The workload itself can be configured using optional enviroment variables. The avaialbe configuration items are: + +- TLS: Environment variable TLS controls the TLS enablement of gRPC and HTTP communications of the microservices in hotelReservation. + - TLS=0 or not set(default): No TLS enabled for gRPC and HTTP communication. + - TLS=1: All the gRPC and HTTP communications will be protected by TLS, e.g. `TLS=1 docker-compose up -d`. + - TLS=: Use specified ciphersuite for TLS, e.g. `TLS=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 docker-compose up -d`. The avaialbe cipher suite can be found at the file [options.go](tls/options.go#L21). + +- GC: Environment variable GC controls the garbage collection target percentage of Golang runtime. The default value is 100. See [golang doc](https://pkg.go.dev/runtime/debug#SetGCPercent) for details. + +- JAEGER_SAMPLE_RATIO: Environment variable JAEGER_SAMPLE_RATIO controls the ratio of requests to be traced Jaeger. Default is 0.01(1%). + +- MEMC_TIMEOUT: Environment variable MEMC_TIMEOUT controls the timeout value in seconds when communicating with memcached. Default is 2 seconds. We may need to increase this value in case of very high work loads. + +- LOG_LEVEL: Environment variable LOG_LEVEL controls the log verbosity. Valid values are: ERROR, WARNING, INFO, TRACE, DEBUG. Default value is INFO. + +Users may run `docker-compose logs ` to check the corresponding configurations. ##### Openshift Read the Readme file in Openshift directory. @@ -43,7 +56,7 @@ Read the Readme file in Kubernetes directory. #### workload generation ```bash -./wrk2/wrk -D exp -t -c -d -L -s ./wrk2/scripts/hotel-reservation/mixed-workload_type_1.lua http://x.x.x.x:5000 -R +../wrk2/wrk -D exp -t -c -d -L -s ./wrk2/scripts/hotel-reservation/mixed-workload_type_1.lua http://x.x.x.x:5000 -R ``` ### Questions and contact diff --git a/hotelReservation/cmd/frontend/main.go b/hotelReservation/cmd/frontend/main.go index 17dc130b0..bd2af9976 100644 --- a/hotelReservation/cmd/frontend/main.go +++ b/hotelReservation/cmd/frontend/main.go @@ -8,10 +8,10 @@ import ( "strconv" "time" - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/frontend" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + "github.com/delimitrou/DeathStarBench/hotelreservation/services/frontend" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/tune" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -32,37 +32,35 @@ func main() { var result map[string]string json.Unmarshal([]byte(byteValue), &result) - serv_port, _ := strconv.Atoi(result["FrontendPort"]) - serv_ip := result["FrontendIP"] + servPort, _ := strconv.Atoi(result["FrontendPort"]) + servIP := result["FrontendIP"] + knativeDNS := result["KnativeDomainName"] - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) var ( - // port = flag.Int("port", 5000, "The server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") + jaegerAddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") + consulAddr = flag.String("consuladdr", result["consulAddress"], "Consul address") ) - - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "frontend", *jaegeraddr) - tracer, err := tracing.Init("frontend", *jaegeraddr) + flag.Parse() + log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "frontend", *jaegerAddr) + tracer, err := tracing.Init("frontend", *jaegerAddr) if err != nil { log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) } log.Info().Msg("Jaeger agent initialized") - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) + log.Info().Msgf("Initializing consul agent [host: %v]...", *consulAddr) + registry, err := registry.NewClient(*consulAddr) if err != nil { log.Panic().Msgf("Got error while initializing consul agent: %v", err) } log.Info().Msg("Consul agent initialized") srv := &frontend.Server{ - Registry: registry, - Tracer: tracer, - IpAddr: serv_ip, - Port: serv_port, + KnativeDns: knativeDNS, + Registry: registry, + Tracer: tracer, + IpAddr: servIP, + Port: servPort, } log.Info().Msg("Starting server...") diff --git a/hotelReservation/cmd/geo/db.go b/hotelReservation/cmd/geo/db.go index a803584ce..982208294 100644 --- a/hotelReservation/cmd/geo/db.go +++ b/hotelReservation/cmd/geo/db.go @@ -1,11 +1,13 @@ package main import ( + "context" + "fmt" "strconv" "github.com/rs/zerolog/log" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" ) type point struct { @@ -14,103 +16,46 @@ type point struct { Plon float64 `bson:"lon"` } -func initializeDatabase(url string) *mgo.Session { - session, err := mgo.Dial(url) - if err != nil { - log.Panic().Msg(err.Error()) - } - // defer session.Close() - log.Info().Msg("New session successfull...") - +func initializeDatabase(url string) (*mongo.Client, func()) { log.Info().Msg("Generating test data...") - c := session.DB("geo-db").C("geo") - count, err := c.Find(&bson.M{"hotelId": "1"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&point{"1", 37.7867, -122.4112}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - count, err = c.Find(&bson.M{"hotelId": "2"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&point{"2", 37.7854, -122.4005}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + newPoints := []interface{}{ + point{"1", 37.7867, -122.4112}, + point{"2", 37.7854, -122.4005}, + point{"3", 37.7854, -122.4071}, + point{"4", 37.7936, -122.3930}, + point{"5", 37.7831, -122.4181}, + point{"6", 37.7863, -122.4015}, } - count, err = c.Find(&bson.M{"hotelId": "3"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&point{"3", 37.7854, -122.4071}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } + for i := 7; i <= 80; i++ { + hotelID := strconv.Itoa(i) + lat := 37.7835 + float64(i)/500.0*3 + lon := -122.41 + float64(i)/500.0*4 - count, err = c.Find(&bson.M{"hotelId": "4"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&point{"4", 37.7936, -122.3930}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + newPoints = append(newPoints, point{hotelID, lat, lon}) } - count, err = c.Find(&bson.M{"hotelId": "5"}).Count() + uri := fmt.Sprintf("mongodb://%s", url) + log.Info().Msgf("Attempting connection to %v", uri) + + opts := options.Client().ApplyURI(uri) + client, err := mongo.Connect(context.TODO(), opts) if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&point{"5", 37.7831, -122.4181}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + log.Panic().Msg(err.Error()) } + log.Info().Msg("Successfully connected to MongoDB") - count, err = c.Find(&bson.M{"hotelId": "6"}).Count() + collection := client.Database("geo-db").Collection("geo") + _, err = collection.InsertMany(context.TODO(), newPoints) if err != nil { log.Fatal().Msg(err.Error()) } - if count == 0 { - err = c.Insert(&point{"6", 37.7863, -122.4015}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } + log.Info().Msg("Successfully inserted test data into geo DB") - // add up to 80 hotels - for i := 7; i <= 80; i++ { - hotel_id := strconv.Itoa(i) - count, err = c.Find(&bson.M{"hotelId": hotel_id}).Count() - if err != nil { + return client, func() { + if err := client.Disconnect(context.TODO()); err != nil { log.Fatal().Msg(err.Error()) } - lat := 37.7835 + float64(i)/500.0*3 - lon := -122.41 + float64(i)/500.0*4 - if count == 0 { - err = c.Insert(&point{hotel_id, lat, lon}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } } - - err = c.EnsureIndexKey("hotelId") - if err != nil { - log.Fatal().Msg(err.Error()) - } - - return session } diff --git a/hotelReservation/cmd/geo/main.go b/hotelReservation/cmd/geo/main.go index 13f94e85c..24407368a 100644 --- a/hotelReservation/cmd/geo/main.go +++ b/hotelReservation/cmd/geo/main.go @@ -8,10 +8,10 @@ import ( "strconv" "time" - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/geo" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + "github.com/delimitrou/DeathStarBench/hotelreservation/services/geo" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/tune" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -33,47 +33,39 @@ func main() { var result map[string]string json.Unmarshal([]byte(byteValue), &result) - log.Info().Msgf("Read database URL: %v", result["GeoMongoAddress"]) log.Info().Msg("Initializing DB connection...") - mongo_session := initializeDatabase(result["GeoMongoAddress"]) - defer mongo_session.Close() - log.Info().Msg("Successfull") + mongoClient, mongoClose := initializeDatabase(result["GeoMongoAddress"]) + defer mongoClose() - serv_port, _ := strconv.Atoi(result["GeoPort"]) - serv_ip := result["GeoIP"] + servPort, _ := strconv.Atoi(result["GeoPort"]) + servIP := result["GeoIP"] - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) var ( - // port = flag.Int("port", 8083, "Server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") + jaegerAddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") + consulAddr = flag.String("consuladdr", result["consulAddress"], "Consul address") ) flag.Parse() - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "geo", *jaegeraddr) - - tracer, err := tracing.Init("geo", *jaegeraddr) + log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "geo", *jaegerAddr) + tracer, err := tracing.Init("geo", *jaegerAddr) if err != nil { log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) } log.Info().Msg("Jaeger agent initialized") - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) + log.Info().Msgf("Initializing consul agent [host: %v]...", *consulAddr) + registry, err := registry.NewClient(*consulAddr) if err != nil { log.Panic().Msgf("Got error while initializing consul agent: %v", err) } log.Info().Msg("Consul agent initialized") srv := &geo.Server{ - // Port: *port, - Port: serv_port, - IpAddr: serv_ip, - Tracer: tracer, - Registry: registry, - MongoSession: mongo_session, + Port: servPort, + IpAddr: servIP, + Tracer: tracer, + Registry: registry, + MongoClient: mongoClient, } log.Info().Msg("Starting server...") diff --git a/hotelReservation/cmd/profile/db.go b/hotelReservation/cmd/profile/db.go index b90ad1d8c..1dee70ab7 100644 --- a/hotelReservation/cmd/profile/db.go +++ b/hotelReservation/cmd/profile/db.go @@ -1,11 +1,13 @@ package main import ( + "context" + "fmt" "strconv" "github.com/rs/zerolog/log" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" ) type Hotel struct { @@ -27,22 +29,11 @@ type Address struct { Lon float32 `bson:"lon"` } -func initializeDatabase(url string) *mgo.Session { - session, err := mgo.Dial(url) - if err != nil { - log.Panic().Msg(err.Error()) - } - // defer session.Close() - log.Info().Msg("New session successfull...") - +func initializeDatabase(url string) (*mongo.Client, func()) { log.Info().Msg("Generating test data...") - c := session.DB("profile-db").C("hotels") - count, err := c.Find(&bson.M{"id": "1"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{ + + newProfiles := []interface{}{ + Hotel{ "1", "Clift Hotel", "(415) 775-4700", @@ -55,18 +46,10 @@ func initializeDatabase(url string) *mgo.Session { "United States", "94102", 37.7867, - -122.4112}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"id": "2"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{ + -122.4112, + }, + }, + Hotel{ "2", "W San Francisco", "(415) 777-5300", @@ -79,18 +62,10 @@ func initializeDatabase(url string) *mgo.Session { "United States", "94103", 37.7854, - -122.4005}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"id": "3"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{ + -122.4005, + }, + }, + Hotel{ "3", "Hotel Zetta", "(415) 543-8555", @@ -103,18 +78,10 @@ func initializeDatabase(url string) *mgo.Session { "United States", "94103", 37.7834, - -122.4071}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"id": "4"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{ + -122.4071, + }, + }, + Hotel{ "4", "Hotel Vitale", "(415) 278-3700", @@ -127,18 +94,10 @@ func initializeDatabase(url string) *mgo.Session { "United States", "94105", 37.7936, - -122.3930}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"id": "5"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{ + -122.3930, + }, + }, + Hotel{ "5", "Phoenix Hotel", "(415) 776-1380", @@ -151,18 +110,10 @@ func initializeDatabase(url string) *mgo.Session { "United States", "94109", 37.7831, - -122.4181}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"id": "6"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{ + -122.4181, + }, + }, + Hotel{ "6", "St. Regis San Francisco", "(415) 284-4000", @@ -175,27 +126,24 @@ func initializeDatabase(url string) *mgo.Session { "United States", "94109", 37.7863, - -122.4015}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + -122.4015, + }, + }, } - // add up to 80 hotels for i := 7; i <= 80; i++ { - hotel_id := strconv.Itoa(i) - count, err = c.Find(&bson.M{"id": hotel_id}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - phone_num := "(415) 284-40" + hotel_id + hotelID := strconv.Itoa(i) + phoneNumber := fmt.Sprintf("(415) 284-40%s", hotelID) + lat := 37.7835 + float32(i)/500.0*3 lon := -122.41 + float32(i)/500.0*4 - if count == 0 { - err = c.Insert(&Hotel{ - hotel_id, + + newProfiles = append( + newProfiles, + Hotel{ + hotelID, "St. Regis San Francisco", - phone_num, + phoneNumber, "St. Regis Museum Tower is a 42-story, 484 ft skyscraper in the South of Market district of San Francisco, California, adjacent to Yerba Buena Gardens, Moscone Center, PacBell Building and the San Francisco Museum of Modern Art.", &Address{ "125", @@ -205,17 +153,32 @@ func initializeDatabase(url string) *mgo.Session { "United States", "94109", lat, - lon}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } + lon, + }, + }, + ) + } + + uri := fmt.Sprintf("mongodb://%s", url) + log.Info().Msgf("Attempting connection to %v", uri) + + opts := options.Client().ApplyURI(uri) + client, err := mongo.Connect(context.TODO(), opts) + if err != nil { + log.Panic().Msg(err.Error()) } + log.Info().Msg("Successfully connected to MongoDB") - err = c.EnsureIndexKey("id") + collection := client.Database("profile-db").Collection("hotels") + _, err = collection.InsertMany(context.TODO(), newProfiles) if err != nil { log.Fatal().Msg(err.Error()) } + log.Info().Msg("Successfully inserted test data into profile DB") - return session + return client, func() { + if err := client.Disconnect(context.TODO()); err != nil { + log.Fatal().Msg(err.Error()) + } + } } diff --git a/hotelReservation/cmd/profile/main.go b/hotelReservation/cmd/profile/main.go index 0bfdaa1b3..f31c0b911 100644 --- a/hotelReservation/cmd/profile/main.go +++ b/hotelReservation/cmd/profile/main.go @@ -6,17 +6,14 @@ import ( "io/ioutil" "os" "strconv" + "time" - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/profile" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + "github.com/delimitrou/DeathStarBench/hotelreservation/services/profile" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/tune" "github.com/rs/zerolog" "github.com/rs/zerolog/log" - - "time" - - "github.com/bradfitz/gomemcache/memcache" ) func main() { @@ -36,54 +33,45 @@ func main() { var result map[string]string json.Unmarshal([]byte(byteValue), &result) - log.Info().Msgf("Read database URL: %v", result["ProfileMongoAddress"]) log.Info().Msg("Initializing DB connection...") - mongo_session := initializeDatabase(result["ProfileMongoAddress"]) - defer mongo_session.Close() - log.Info().Msg("Successfull") + mongoClient, mongoClose := initializeDatabase(result["ProfileMongoAddress"]) + defer mongoClose() log.Info().Msgf("Read profile memcashed address: %v", result["ProfileMemcAddress"]) log.Info().Msg("Initializing Memcashed client...") - memc_client := memcache.New(result["ProfileMemcAddress"]) - memc_client.Timeout = time.Second * 2 - memc_client.MaxIdleConns = 512 - log.Info().Msg("Successfull") + memcClient := tune.NewMemCClient2(result["ProfileMemcAddress"]) + log.Info().Msg("Success") - serv_port, _ := strconv.Atoi(result["ProfilePort"]) - serv_ip := result["ProfileIP"] - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) + servPort, _ := strconv.Atoi(result["ProfilePort"]) + servIP := result["ProfileIP"] var ( - // port = flag.Int("port", 8081, "The server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger server addr") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") + jaegerAddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") + consulAddr = flag.String("consuladdr", result["consulAddress"], "Consul address") ) flag.Parse() - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "profile", *jaegeraddr) - tracer, err := tracing.Init("profile", *jaegeraddr) + log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "profile", *jaegerAddr) + tracer, err := tracing.Init("profile", *jaegerAddr) if err != nil { log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) } log.Info().Msg("Jaeger agent initialized") - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) + log.Info().Msgf("Initializing consul agent [host: %v]...", *consulAddr) + registry, err := registry.NewClient(*consulAddr) if err != nil { log.Panic().Msgf("Got error while initializing consul agent: %v", err) } log.Info().Msg("Consul agent initialized") - srv := profile.Server{ - Tracer: tracer, - // Port: *port, - Registry: registry, - Port: serv_port, - IpAddr: serv_ip, - MongoSession: mongo_session, - MemcClient: memc_client, + srv := &profile.Server{ + Port: servPort, + IpAddr: servIP, + Tracer: tracer, + Registry: registry, + MongoClient: mongoClient, + MemcClient: memcClient, } log.Info().Msg("Starting server...") diff --git a/hotelReservation/cmd/rate/db.go b/hotelReservation/cmd/rate/db.go index 7a33eee65..0b5060392 100644 --- a/hotelReservation/cmd/rate/db.go +++ b/hotelReservation/cmd/rate/db.go @@ -1,11 +1,13 @@ package main import ( + "context" + "fmt" "strconv" "github.com/rs/zerolog/log" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" ) type RoomType struct { @@ -24,22 +26,11 @@ type RatePlan struct { RoomType *RoomType `bson:"roomType"` } -func initializeDatabase(url string) *mgo.Session { - session, err := mgo.Dial(url) - if err != nil { - log.Panic().Msg(err.Error()) - } - // defer session.Close() - log.Info().Msg("New session successfull...") - +func initializeDatabase(url string) (*mongo.Client, func()) { log.Info().Msg("Generating test data...") - c := session.DB("rate-db").C("inventory") - count, err := c.Find(&bson.M{"hotelId": "1"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&RatePlan{ + + newRatePlans := []interface{}{ + RatePlan{ "1", "RACK", "2015-04-09", @@ -49,18 +40,10 @@ func initializeDatabase(url string) *mgo.Session { "KNG", "King sized bed", 109.00, - 123.17}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"hotelId": "2"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&RatePlan{ + 123.17, + }, + }, + RatePlan{ "2", "RACK", "2015-04-09", @@ -70,18 +53,10 @@ func initializeDatabase(url string) *mgo.Session { "QN", "Queen sized bed", 139.00, - 153.09}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"hotelId": "3"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&RatePlan{ + 153.09, + }, + }, + RatePlan{ "3", "RACK", "2015-04-09", @@ -91,66 +66,79 @@ func initializeDatabase(url string) *mgo.Session { "KNG", "King sized bed", 109.00, - 123.17}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + 123.17, + }, + }, } - // add up to 80 hotels for i := 7; i <= 80; i++ { - if i%3 == 0 { - hotel_id := strconv.Itoa(i) - count, err = c.Find(&bson.M{"hotelId": hotel_id}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - end_date := "2015-04-" - rate := 109.00 - rate_inc := 123.17 - if i%2 == 0 { - end_date = end_date + "17" - } else { - end_date = end_date + "24" - } + if i%3 != 0 { + continue + } - if i%5 == 1 { - rate = 120.00 - rate_inc = 140.00 - } else if i%5 == 2 { - rate = 124.00 - rate_inc = 144.00 - } else if i%5 == 3 { - rate = 132.00 - rate_inc = 158.00 - } else if i%5 == 4 { - rate = 232.00 - rate_inc = 258.00 - } + hotelID := strconv.Itoa(i) + + endDate := "2015-04-" + if i%2 == 0 { + endDate = fmt.Sprintf("%s17", endDate) + } else { + endDate = fmt.Sprintf("%s24", endDate) + } - if count == 0 { - err = c.Insert(&RatePlan{ - hotel_id, - "RACK", - "2015-04-09", - end_date, - &RoomType{ - rate, - "KNG", - "King sized bed", - rate, - rate_inc}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } + rate := 109.00 + rateInc := 123.17 + if i%5 == 1 { + rate = 120.00 + rateInc = 140.00 + } else if i%5 == 2 { + rate = 124.00 + rateInc = 144.00 + } else if i%5 == 3 { + rate = 132.00 + rateInc = 158.00 + } else if i%5 == 4 { + rate = 232.00 + rateInc = 258.00 } + + newRatePlans = append( + newRatePlans, + RatePlan{ + hotelID, + "RACK", + "2015-04-09", + endDate, + &RoomType{ + rate, + "KNG", + "King sized bed", + rate, + rateInc, + }, + }, + ) } - err = c.EnsureIndexKey("hotelId") + uri := fmt.Sprintf("mongodb://%s", url) + log.Info().Msgf("Attempting connection to %v", uri) + + opts := options.Client().ApplyURI(uri) + client, err := mongo.Connect(context.TODO(), opts) + if err != nil { + log.Panic().Msg(err.Error()) + } + log.Info().Msg("Successfully connected to MongoDB") + + collection := client.Database("rate-db").Collection("inventory") + _, err = collection.InsertMany(context.TODO(), newRatePlans) if err != nil { log.Fatal().Msg(err.Error()) } + log.Info().Msg("Successfully inserted test data into rate DB") - return session + return client, func() { + if err := client.Disconnect(context.TODO()); err != nil { + log.Fatal().Msg(err.Error()) + } + } } diff --git a/hotelReservation/cmd/rate/main.go b/hotelReservation/cmd/rate/main.go index 33b2c9ed2..936fe2e6c 100644 --- a/hotelReservation/cmd/rate/main.go +++ b/hotelReservation/cmd/rate/main.go @@ -7,16 +7,14 @@ import ( "os" "strconv" - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/rate" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + "github.com/delimitrou/DeathStarBench/hotelreservation/services/rate" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/tune" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "time" - - "github.com/bradfitz/gomemcache/memcache" ) func main() { @@ -36,54 +34,45 @@ func main() { var result map[string]string json.Unmarshal([]byte(byteValue), &result) - log.Info().Msgf("Read database URL: %v", result["RateMongoAddress"]) log.Info().Msg("Initializing DB connection...") - mongo_session := initializeDatabase(result["RateMongoAddress"]) - defer mongo_session.Close() - log.Info().Msg("Successfull") + mongoClient, mongoClose := initializeDatabase(result["RateMongoAddress"]) + defer mongoClose() log.Info().Msgf("Read profile memcashed address: %v", result["RateMemcAddress"]) log.Info().Msg("Initializing Memcashed client...") - memc_client := memcache.New(result["RateMemcAddress"]) - memc_client.Timeout = time.Second * 2 - memc_client.MaxIdleConns = 512 - log.Info().Msg("Successfull") + memcClient := tune.NewMemCClient2(result["RateMemcAddress"]) + log.Info().Msg("Success") - serv_port, _ := strconv.Atoi(result["RatePort"]) - serv_ip := result["RateIP"] + servPort, _ := strconv.Atoi(result["RatePort"]) + servIP := result["RateIP"] - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) var ( - // port = flag.Int("port", 8084, "The server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger server addr") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") + jaegerAddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") + consulAddr = flag.String("consuladdr", result["consulAddress"], "Consul address") ) flag.Parse() - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "rate", *jaegeraddr) - tracer, err := tracing.Init("rate", *jaegeraddr) + log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "rate", *jaegerAddr) + tracer, err := tracing.Init("rate", *jaegerAddr) if err != nil { log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) } log.Info().Msg("Jaeger agent initialized") - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) + log.Info().Msgf("Initializing consul agent [host: %v]...", *consulAddr) + registry, err := registry.NewClient(*consulAddr) if err != nil { log.Panic().Msgf("Got error while initializing consul agent: %v", err) } log.Info().Msg("Consul agent initialized") srv := &rate.Server{ - Tracer: tracer, - // Port: *port, - Registry: registry, - Port: serv_port, - IpAddr: serv_ip, - MongoSession: mongo_session, - MemcClient: memc_client, + Tracer: tracer, + Registry: registry, + Port: servPort, + IpAddr: servIP, + MongoClient: mongoClient, + MemcClient: memcClient, } log.Info().Msg("Starting server...") diff --git a/hotelReservation/cmd/recommendation/db.go b/hotelReservation/cmd/recommendation/db.go index 94ce129f8..fbd3425e8 100644 --- a/hotelReservation/cmd/recommendation/db.go +++ b/hotelReservation/cmd/recommendation/db.go @@ -1,11 +1,13 @@ package main import ( + "context" + "fmt" "strconv" "github.com/rs/zerolog/log" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" ) type Hotel struct { @@ -16,131 +18,71 @@ type Hotel struct { HPrice float64 `bson:"price"` } -func initializeDatabase(url string) *mgo.Session { - session, err := mgo.Dial(url) - if err != nil { - log.Panic().Msg(err.Error()) - } - // defer session.Close() - log.Info().Msg("New session successfull...") - +func initializeDatabase(url string) (*mongo.Client, func()) { log.Info().Msg("Generating test data...") - c := session.DB("recommendation-db").C("recommendation") - count, err := c.Find(&bson.M{"hotelId": "1"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{"1", 37.7867, -122.4112, 109.00, 150.00}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - count, err = c.Find(&bson.M{"hotelId": "2"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{"2", 37.7854, -122.4005, 139.00, 120.00}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + newHotels := []interface{}{ + Hotel{"1", 37.7867, -122.4112, 109.00, 150.00}, + Hotel{"2", 37.7854, -122.4005, 139.00, 120.00}, + Hotel{"3", 37.7834, -122.4071, 109.00, 190.00}, + Hotel{"4", 37.7936, -122.3930, 129.00, 160.00}, + Hotel{"5", 37.7831, -122.4181, 119.00, 140.00}, + Hotel{"6", 37.7863, -122.4015, 149.00, 200.00}, } - count, err = c.Find(&bson.M{"hotelId": "3"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{"3", 37.7834, -122.4071, 109.00, 190.00}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"hotelId": "4"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{"4", 37.7936, -122.3930, 129.00, 160.00}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"hotelId": "5"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{"5", 37.7831, -122.4181, 119.00, 140.00}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"hotelId": "6"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Hotel{"6", 37.7863, -122.4015, 149.00, 200.00}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - // add up to 80 hotels for i := 7; i <= 80; i++ { - hotel_id := strconv.Itoa(i) - count, err = c.Find(&bson.M{"hotelId": hotel_id}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } + rate := 135.00 + rateInc := 179.00 + hotelID := strconv.Itoa(i) lat := 37.7835 + float64(i)/500.0*3 lon := -122.41 + float64(i)/500.0*4 - count, err = c.Find(&bson.M{"hotelId": hotel_id}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - - rate := 135.00 - rate_inc := 179.00 if i%3 == 0 { - if i%5 == 0 { - rate = 109.00 - rate_inc = 123.17 - } else if i%5 == 1 { + switch i % 5 { + case 1: rate = 120.00 - rate_inc = 140.00 - } else if i%5 == 2 { + rateInc = 140.00 + case 2: rate = 124.00 - rate_inc = 144.00 - } else if i%5 == 3 { + rateInc = 144.00 + case 3: rate = 132.00 - rate_inc = 158.00 - } else if i%5 == 4 { + rateInc = 158.00 + case 4: rate = 232.00 - rate_inc = 258.00 + rateInc = 258.00 + default: + rate = 109.00 + rateInc = 123.17 } } - if count == 0 { - err = c.Insert(&Hotel{hotel_id, lat, lon, rate, rate_inc}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } + newHotels = append( + newHotels, + Hotel{hotelID, lat, lon, rate, rateInc}, + ) + } + + uri := fmt.Sprintf("mongodb://%s", url) + log.Info().Msgf("Attempting connection to %v", uri) + opts := options.Client().ApplyURI(uri) + client, err := mongo.Connect(context.TODO(), opts) + if err != nil { + log.Panic().Msg(err.Error()) } + log.Info().Msg("Successfully connected to MongoDB") - err = c.EnsureIndexKey("hotelId") + collection := client.Database("recommendation-db").Collection("recommendation") + _, err = collection.InsertMany(context.TODO(), newHotels) if err != nil { log.Fatal().Msg(err.Error()) } + log.Info().Msg("Successfully inserted test data into recommendation DB") - return session + return client, func() { + if err := client.Disconnect(context.TODO()); err != nil { + log.Fatal().Msg(err.Error()) + } + } } diff --git a/hotelReservation/cmd/recommendation/main.go b/hotelReservation/cmd/recommendation/main.go index d734eff3a..e96aeaf9d 100644 --- a/hotelReservation/cmd/recommendation/main.go +++ b/hotelReservation/cmd/recommendation/main.go @@ -9,13 +9,12 @@ import ( "strconv" - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/recommendation" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + "github.com/delimitrou/DeathStarBench/hotelreservation/services/recommendation" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/tune" "github.com/rs/zerolog" "github.com/rs/zerolog/log" - // "github.com/bradfitz/gomemcache/memcache" ) func main() { @@ -35,47 +34,39 @@ func main() { var result map[string]string json.Unmarshal([]byte(byteValue), &result) - log.Info().Msgf("Read database URL: %v", result["RecommendMongoAddress"]) log.Info().Msg("Initializing DB connection...") - mongo_session := initializeDatabase(result["RecommendMongoAddress"]) - defer mongo_session.Close() - log.Info().Msg("Successfull") + mongoClient, mongoClose := initializeDatabase(result["RecommendMongoAddress"]) + defer mongoClose() - serv_port, _ := strconv.Atoi(result["RecommendPort"]) - serv_ip := result["RecommendIP"] - - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) + servPort, _ := strconv.Atoi(result["RecommendPort"]) + servIP := result["RecommendIP"] var ( - // port = flag.Int("port", 8085, "The server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger server addr") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") + jaegerAddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") + consulAddr = flag.String("consuladdr", result["consulAddress"], "Consul address") ) flag.Parse() - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "recommendation", *jaegeraddr) - tracer, err := tracing.Init("recommendation", *jaegeraddr) + log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "recommendation", *jaegerAddr) + tracer, err := tracing.Init("recommendation", *jaegerAddr) if err != nil { log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) } log.Info().Msg("Jaeger agent initialized") - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) + log.Info().Msgf("Initializing consul agent [host: %v]...", *consulAddr) + registry, err := registry.NewClient(*consulAddr) if err != nil { log.Panic().Msgf("Got error while initializing consul agent: %v", err) } log.Info().Msg("Consul agent initialized") srv := &recommendation.Server{ - Tracer: tracer, - // Port: *port, - Registry: registry, - Port: serv_port, - IpAddr: serv_ip, - MongoSession: mongo_session, + Port: servPort, + IpAddr: servIP, + Tracer: tracer, + Registry: registry, + MongoClient: mongoClient, } log.Info().Msg("Starting server...") diff --git a/hotelReservation/cmd/reservation/db.go b/hotelReservation/cmd/reservation/db.go index 463a5c193..9483e8450 100644 --- a/hotelReservation/cmd/reservation/db.go +++ b/hotelReservation/cmd/reservation/db.go @@ -1,11 +1,13 @@ package main import ( + "context" + "fmt" "strconv" "github.com/rs/zerolog/log" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" ) type Reservation struct { @@ -21,117 +23,63 @@ type Number struct { Number int `bson:"numberOfRoom"` } -func initializeDatabase(url string) *mgo.Session { - session, err := mgo.Dial(url) - if err != nil { - log.Panic().Msg(err.Error()) - } - // defer session.Close() - log.Info().Msg("New session successfull...") +func initializeDatabase(url string) (*mongo.Client, func()) { + log.Info().Msg("Generating test data...") - c := session.DB("reservation-db").C("reservation") - count, err := c.Find(&bson.M{"hotelId": "4"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Reservation{"4", "Alice", "2015-04-09", "2015-04-10", 1}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + newReservations := []interface{}{ + Reservation{"4", "Alice", "2015-04-09", "2015-04-10", 1}, } - c = session.DB("reservation-db").C("number") - count, err = c.Find(&bson.M{"hotelId": "1"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Number{"1", 200}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + newNumbers := []interface{}{ + Number{"1", 200}, + Number{"2", 200}, + Number{"3", 200}, + Number{"4", 200}, + Number{"5", 200}, + Number{"6", 200}, } - count, err = c.Find(&bson.M{"hotelId": "2"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Number{"2", 200}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } + for i := 7; i <= 80; i++ { + hotelID := strconv.Itoa(i) - count, err = c.Find(&bson.M{"hotelId": "3"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Number{"3", 200}) - if err != nil { - log.Fatal().Msg(err.Error()) + roomNumber := 200 + if i%3 == 1 { + roomNumber = 300 + } else if i%3 == 2 { + roomNumber = 250 } + + newNumbers = append(newNumbers, Number{hotelID, roomNumber}) } - count, err = c.Find(&bson.M{"hotelId": "4"}).Count() + uri := fmt.Sprintf("mongodb://%s", url) + log.Info().Msgf("Attempting connection to %v", uri) + + opts := options.Client().ApplyURI(uri) + client, err := mongo.Connect(context.TODO(), opts) if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - err = c.Insert(&Number{"4", 200}) - if err != nil { - log.Fatal().Msg(err.Error()) - } + log.Panic().Msg(err.Error()) } + log.Info().Msg("Successfully connected to MongoDB") + + database := client.Database("reservation-db") + resCollection := database.Collection("reservation") + numCollection := database.Collection("number") - count, err = c.Find(&bson.M{"hotelId": "5"}).Count() + _, err = resCollection.InsertMany(context.TODO(), newReservations) if err != nil { log.Fatal().Msg(err.Error()) } - if count == 0 { - err = c.Insert(&Number{"5", 200}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - count, err = c.Find(&bson.M{"hotelId": "6"}).Count() + _, err = numCollection.InsertMany(context.TODO(), newNumbers) if err != nil { log.Fatal().Msg(err.Error()) } - if count == 0 { - err = c.Insert(&Number{"6", 200}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } + log.Info().Msg("Successfully inserted test data into reservation DB") - for i := 7; i <= 80; i++ { - hotel_id := strconv.Itoa(i) - count, err = c.Find(&bson.M{"hotelId": hotel_id}).Count() - if err != nil { + return client, func() { + if err := client.Disconnect(context.TODO()); err != nil { log.Fatal().Msg(err.Error()) } - room_num := 200 - if i%3 == 1 { - room_num = 300 - } else if i%3 == 2 { - room_num = 250 - } - if count == 0 { - err = c.Insert(&Number{hotel_id, room_num}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - } - - err = c.EnsureIndexKey("hotelId") - if err != nil { - log.Fatal().Msg(err.Error()) } - - return session } diff --git a/hotelReservation/cmd/reservation/main.go b/hotelReservation/cmd/reservation/main.go index de8c76bb1..a4a424f4d 100644 --- a/hotelReservation/cmd/reservation/main.go +++ b/hotelReservation/cmd/reservation/main.go @@ -5,19 +5,15 @@ import ( "flag" "io/ioutil" "os" - "strconv" + "time" - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/reservation" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + "github.com/delimitrou/DeathStarBench/hotelreservation/services/reservation" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/tune" "github.com/rs/zerolog" "github.com/rs/zerolog/log" - - "time" - - "github.com/bradfitz/gomemcache/memcache" ) func main() { @@ -37,54 +33,45 @@ func main() { var result map[string]string json.Unmarshal([]byte(byteValue), &result) - log.Info().Msgf("Read database URL: %v", result["ReserveMongoAddress"]) log.Info().Msg("Initializing DB connection...") - mongo_session := initializeDatabase(result["ReserveMongoAddress"]) - defer mongo_session.Close() - log.Info().Msg("Successfull") + mongoClient, mongoClose := initializeDatabase(result["ReserveMongoAddress"]) + defer mongoClose() log.Info().Msgf("Read profile memcashed address: %v", result["ReserveMemcAddress"]) log.Info().Msg("Initializing Memcashed client...") - memc_client := memcache.New(result["ReserveMemcAddress"]) - memc_client.Timeout = time.Second * 2 - memc_client.MaxIdleConns = 512 - log.Info().Msg("Successfull") + memcClient := tune.NewMemCClient2(result["ReserveMemcAddress"]) + log.Info().Msg("Success") - serv_port, _ := strconv.Atoi(result["ReservePort"]) - serv_ip := result["ReserveIP"] - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) + servPort, _ := strconv.Atoi(result["ReservePort"]) + servIP := result["ReserveIP"] var ( - // port = flag.Int("port", 8087, "The server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger server addr") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") + jaegerAddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") + consulAddr = flag.String("consuladdr", result["consulAddress"], "Consul address") ) flag.Parse() - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "reservation", *jaegeraddr) - tracer, err := tracing.Init("reservation", *jaegeraddr) + log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "reservation", *jaegerAddr) + tracer, err := tracing.Init("reservation", *jaegerAddr) if err != nil { log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) } log.Info().Msg("Jaeger agent initialized") - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) + log.Info().Msgf("Initializing consul agent [host: %v]...", *consulAddr) + registry, err := registry.NewClient(*consulAddr) if err != nil { log.Panic().Msgf("Got error while initializing consul agent: %v", err) } log.Info().Msg("Consul agent initialized") srv := &reservation.Server{ - Tracer: tracer, - // Port: *port, - Registry: registry, - Port: serv_port, - IpAddr: serv_ip, - MongoSession: mongo_session, - MemcClient: memc_client, + Tracer: tracer, + Registry: registry, + Port: servPort, + IpAddr: servIP, + MongoClient: mongoClient, + MemcClient: memcClient, } log.Info().Msg("Starting server...") diff --git a/hotelReservation/cmd/review/db.go b/hotelReservation/cmd/review/db.go deleted file mode 100644 index 8411e9ae3..000000000 --- a/hotelReservation/cmd/review/db.go +++ /dev/null @@ -1,163 +0,0 @@ -package main - -import ( - "github.com/rs/zerolog/log" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - "fmt" -) - -type Review struct { - ReviewId string `bson:"reviewId"` - HotelId string `bson:"hotelId"` - Name string `bson:"name"` - Rating float32 `bson:"rating"` - Description string `bson:"description"` - Image *Image `bson:"images"` -} - -type Image struct { - Url string `bson:"url"` - Default bool `bson:"default"` -} - -func initializeDatabase(url string) *mgo.Session { - session, err := mgo.Dial(url) - fmt.Println("Initialize Database ", url) - if err != nil { - log.Panic().Msg(err.Error()) - } - // defer session.Close() - log.Info().Msg("New session successfull...") - - log.Info().Msg("Generating test data...") - c := session.DB("review-db").C("reviews") - count, err := c.Find(&bson.M{"reviewId": "1"}).Count() - fmt.Println("Count = ", count) - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - fmt.Println("Insert 1") - err = c.Insert(&Review{ - "1", - "1", - "Person 1", - 3.4, - "A 6-minute walk from Union Square and 4 minutes from a Muni Metro station, this luxury hotel designed by Philippe Starck features an artsy furniture collection in the lobby, including work by Salvador Dali.", - &Image{ - "some url", - false}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"reviewId": "2"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - fmt.Println("Insert 2") - err = c.Insert(&Review{ - "2", - "1", - "Person 2", - 4.4, - "A 6-minute walk from Union Square and 4 minutes from a Muni Metro station, this luxury hotel designed by Philippe Starck features an artsy furniture collection in the lobby, including work by Salvador Dali.", - &Image{ - "some url", - false}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"reviewId": "3"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - fmt.Println("Insert 3") - err = c.Insert(&Review{ - "3", - "1", - "Person 3", - 4.2, - "A 6-minute walk from Union Square and 4 minutes from a Muni Metro station, this luxury hotel designed by Philippe Starck features an artsy furniture collection in the lobby, including work by Salvador Dali.", - &Image{ - "some url", - false}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"reviewId": "4"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - fmt.Println("Insert 4") - err = c.Insert(&Review{ - "4", - "1", - "Person 4", - 3.9, - "A 6-minute walk from Union Square and 4 minutes from a Muni Metro station, this luxury hotel designed by Philippe Starck features an artsy furniture collection in the lobby, including work by Salvador Dali.", - &Image{ - "some url", - false}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"reviewId": "5"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - fmt.Println("Insert 5") - err = c.Insert(&Review{ - "5", - "2", - "Person 5", - 4.2, - "A 6-minute walk from Union Square and 4 minutes from a Muni Metro station, this luxury hotel designed by Philippe Starck features an artsy furniture collection in the lobby, including work by Salvador Dali.", - &Image{ - "some url", - false}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - count, err = c.Find(&bson.M{"reviewId": "6"}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - fmt.Println("Insert 6") - err = c.Insert(&Review{ - "6", - "2", - "Person 6", - 3.7, - "A 6-minute walk from Union Square and 4 minutes from a Muni Metro station, this luxury hotel designed by Philippe Starck features an artsy furniture collection in the lobby, including work by Salvador Dali.", - &Image{ - "some url", - false}}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } - - err = c.EnsureIndexKey("reviewId") - err = c.EnsureIndexKey("hotelId") - if err != nil { - log.Fatal().Msg(err.Error()) - } - - return session -} diff --git a/hotelReservation/cmd/review/main.go b/hotelReservation/cmd/review/main.go deleted file mode 100644 index 55aa92849..000000000 --- a/hotelReservation/cmd/review/main.go +++ /dev/null @@ -1,91 +0,0 @@ -package main - -import ( - "encoding/json" - "flag" - "io/ioutil" - "os" - "strconv" - - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/review" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - - "time" - - "github.com/bradfitz/gomemcache/memcache" -) - -func main() { - tune.Init() - log.Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339}).With().Timestamp().Caller().Logger() - - log.Info().Msg("Reading config...") - jsonFile, err := os.Open("config.json") - if err != nil { - log.Error().Msgf("Got error while reading config: %v", err) - } - - defer jsonFile.Close() - - byteValue, _ := ioutil.ReadAll(jsonFile) - - var result map[string]string - json.Unmarshal([]byte(byteValue), &result) - - log.Info().Msgf("Read database URL: %v", result["ReviewMongoAddress"]) - log.Info().Msg("Initializing DB connection...") - mongo_session := initializeDatabase(result["ReviewMongoAddress"]) - defer mongo_session.Close() - log.Info().Msg("Successfull") - - log.Info().Msgf("Read review memcashed address: %v", result["ReviewMemcAddress"]) - log.Info().Msg("Initializing Memcashed client...") - memc_client := memcache.New(result["ReviewMemcAddress"]) - memc_client.Timeout = time.Second * 2 - memc_client.MaxIdleConns = 512 - log.Info().Msg("Successfull") - - serv_port, _ := strconv.Atoi(result["ReviewPort"]) - serv_ip := result["ReviewIP"] - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) - - var ( - // port = flag.Int("port", 8081, "The server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger server addr") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") - ) - flag.Parse() - - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "review", *jaegeraddr) - tracer, err := tracing.Init("review", *jaegeraddr) - if err != nil { - log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) - } - log.Info().Msg("Jaeger agent initialized") - - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) - if err != nil { - log.Panic().Msgf("Got error while initializing consul agent: %v", err) - } - log.Info().Msg("Consul agent initialized") - - srv := review.Server{ - Tracer: tracer, - // Port: *port, - Registry: registry, - Port: serv_port, - IpAddr: serv_ip, - MongoSession: mongo_session, - MemcClient: memc_client, - } - - log.Info().Msg("Starting server...") - log.Fatal().Msg(srv.Run().Error()) -} diff --git a/hotelReservation/cmd/search/main.go b/hotelReservation/cmd/search/main.go index c01b81d7c..63f26d578 100644 --- a/hotelReservation/cmd/search/main.go +++ b/hotelReservation/cmd/search/main.go @@ -9,10 +9,10 @@ import ( "strconv" - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/search" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + "github.com/delimitrou/DeathStarBench/hotelreservation/services/search" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/tune" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -34,39 +34,36 @@ func main() { var result map[string]string json.Unmarshal([]byte(byteValue), &result) - serv_port, _ := strconv.Atoi(result["SearchPort"]) - serv_ip := result["SearchIP"] - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) + servPort, _ := strconv.Atoi(result["SearchPort"]) + servIP := result["SearchIP"] + knativeDNS := result["KnativeDomainName"] var ( - // port = flag.Int("port", 8082, "The server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") + jaegerAddr = flag.String("jaegerAddr", result["jaegerAddress"], "Jaeger address") + consulAddr = flag.String("consulAddr", result["consulAddress"], "Consul address") ) flag.Parse() - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "search", *jaegeraddr) - tracer, err := tracing.Init("search", *jaegeraddr) + log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "search", *jaegerAddr) + tracer, err := tracing.Init("search", *jaegerAddr) if err != nil { log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) } log.Info().Msg("Jaeger agent initialized") - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) + log.Info().Msgf("Initializing consul agent [host: %v]...", *consulAddr) + registry, err := registry.NewClient(*consulAddr) if err != nil { log.Panic().Msgf("Got error while initializing consul agent: %v", err) } log.Info().Msg("Consul agent initialized") srv := &search.Server{ - Tracer: tracer, - // Port: *port, - Port: serv_port, - IpAddr: serv_ip, - Registry: registry, + Tracer: tracer, + Port: servPort, + IpAddr: servIP, + KnativeDns: knativeDNS, + Registry: registry, } log.Info().Msg("Starting server...") diff --git a/hotelReservation/cmd/user/db.go b/hotelReservation/cmd/user/db.go index af824932a..6840d54e7 100644 --- a/hotelReservation/cmd/user/db.go +++ b/hotelReservation/cmd/user/db.go @@ -1,13 +1,14 @@ package main import ( + "context" "crypto/sha256" "fmt" "strconv" "github.com/rs/zerolog/log" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" ) type User struct { @@ -15,65 +16,46 @@ type User struct { Password string `bson:"password"` } -func initializeDatabase(url string) *mgo.Session { - session, err := mgo.Dial(url) - if err != nil { - log.Panic().Msg(err.Error()) - } - // defer session.Close() - log.Info().Msg("New session successfull...") - +func initializeDatabase(url string) (*mongo.Client, func()) { log.Info().Msg("Generating test data...") - c := session.DB("user-db").C("user") + + newUsers := []interface{}{} + for i := 0; i <= 500; i++ { suffix := strconv.Itoa(i) - user_name := "Cornell_" + suffix + password := "" for j := 0; j < 10; j++ { password += suffix } + sum := sha256.Sum256([]byte(password)) - count, err := c.Find(&bson.M{"username": user_name}).Count() - if err != nil { - log.Fatal().Msg(err.Error()) - } - if count == 0 { - sum := sha256.Sum256([]byte(password)) - pass := fmt.Sprintf("%x", sum) - err = c.Insert(&User{user_name, pass}) - if err != nil { - log.Fatal().Msg(err.Error()) - } - } + newUsers = append(newUsers, User{ + fmt.Sprintf("Cornell_%x", suffix), + fmt.Sprintf("%x", sum), + }) + } + + uri := fmt.Sprintf("mongodb://%s", url) + log.Info().Msgf("Attempting connection to %v", uri) + opts := options.Client().ApplyURI(uri) + client, err := mongo.Connect(context.TODO(), opts) + if err != nil { + log.Panic().Msg(err.Error()) } + log.Info().Msg("Successfully connected to MongoDB") - err = c.EnsureIndexKey("username") + collection := client.Database("user-db").Collection("user") + _, err = collection.InsertMany(context.TODO(), newUsers) if err != nil { log.Fatal().Msg(err.Error()) } + log.Info().Msg("Successfully inserted test data into user DB") - return session - - // count, err := c.Find(&bson.M{"username": "Cornell"}).Count() - // if err != nil { - // log.Fatal(err) - // } - // if count == 0{ - // err = c.Insert(&User{"Cornell", "302eacf716390b1ebb39012b130302efec8a32ac4b8ad0a911112c53b60382b0"}) - // if err != nil { - // log.Fatal(err) - // } - // } - - // count, err = c.Find(&bson.M{"username": "ECE"}).Count() - // if err != nil { - // log.Fatal(err) - // } - // if count == 0{ - // err = c.Insert(&User{"ECE", "a0a44ed8cfc32b7e61befeb99bbff7706808c3fe4dcdf4750a8addb3ffcd4008"}) - // if err != nil { - // log.Fatal(err) - // } - // } + return client, func() { + if err := client.Disconnect(context.TODO()); err != nil { + log.Fatal().Msg(err.Error()) + } + } } diff --git a/hotelReservation/cmd/user/main.go b/hotelReservation/cmd/user/main.go index 0cddaf06f..f1891df23 100644 --- a/hotelReservation/cmd/user/main.go +++ b/hotelReservation/cmd/user/main.go @@ -8,10 +8,10 @@ import ( "strconv" "time" - "github.com/harlow/go-micro-services/registry" - "github.com/harlow/go-micro-services/services/user" - "github.com/harlow/go-micro-services/tracing" - "github.com/harlow/go-micro-services/tune" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + "github.com/delimitrou/DeathStarBench/hotelreservation/services/user" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/tune" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -34,46 +34,39 @@ func main() { var result map[string]string json.Unmarshal([]byte(byteValue), &result) - log.Info().Msgf("Read database URL: %v", result["UserMongoAddress"]) log.Info().Msg("Initializing DB connection...") - mongo_session := initializeDatabase(result["UserMongoAddress"]) - defer mongo_session.Close() - log.Info().Msg("Successfull") + mongoClient, mongoClose := initializeDatabase(result["UserMongoAddress"]) + defer mongoClose() - serv_port, _ := strconv.Atoi(result["UserPort"]) - serv_ip := result["UserIP"] - log.Info().Msgf("Read target port: %v", serv_port) - log.Info().Msgf("Read consul address: %v", result["consulAddress"]) - log.Info().Msgf("Read jaeger address: %v", result["jaegerAddress"]) + servPort, _ := strconv.Atoi(result["UserPort"]) + servIP := result["UserIP"] var ( - // port = flag.Int("port", 8086, "The server port") - jaegeraddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger server addr") - consuladdr = flag.String("consuladdr", result["consulAddress"], "Consul address") + jaegerAddr = flag.String("jaegeraddr", result["jaegerAddress"], "Jaeger address") + consulAddr = flag.String("consuladdr", result["consulAddress"], "Consul address") ) flag.Parse() - log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "user", *jaegeraddr) - tracer, err := tracing.Init("user", *jaegeraddr) + log.Info().Msgf("Initializing jaeger agent [service name: %v | host: %v]...", "user", *jaegerAddr) + tracer, err := tracing.Init("user", *jaegerAddr) if err != nil { log.Panic().Msgf("Got error while initializing jaeger agent: %v", err) } log.Info().Msg("Jaeger agent initialized") - log.Info().Msgf("Initializing consul agent [host: %v]...", *consuladdr) - registry, err := registry.NewClient(*consuladdr) + log.Info().Msgf("Initializing consul agent [host: %v]...", *consulAddr) + registry, err := registry.NewClient(*consulAddr) if err != nil { log.Panic().Msgf("Got error while initializing consul agent: %v", err) } log.Info().Msg("Consul agent initialized") srv := &user.Server{ - Tracer: tracer, - // Port: *port, - Registry: registry, - Port: serv_port, - IpAddr: serv_ip, - MongoSession: mongo_session, + Port: servPort, + IpAddr: servIP, + Tracer: tracer, + Registry: registry, + MongoClient: mongoClient, } log.Info().Msg("Starting server...") diff --git a/hotelReservation/config.json b/hotelReservation/config.json index f14312aeb..2404c4578 100644 --- a/hotelReservation/config.json +++ b/hotelReservation/config.json @@ -4,12 +4,9 @@ "FrontendPort": "5000", "GeoPort": "8083", "GeoMongoAddress": "mongodb-geo:27017", - "ProfilePort": "8088", + "ProfilePort": "8081", "ProfileMongoAddress": "mongodb-profile:27017", "ProfileMemcAddress": "memcached-profile:11211", - "ReviewPort": "8081", - "ReviewMongoAddress": "mongodb-review:27017", - "ReviewMemcAddress": "memcached-review:11211", "RatePort": "8084", "RateMongoAddress": "mongodb-rate:27017", "RateMemcAddress": "memcached-rate:11211", @@ -20,5 +17,6 @@ "ReserveMemcAddress": "memcached-reserve:11211", "SearchPort": "8082", "UserPort": "8086", - "UserMongoAddress": "mongodb-user:27017" + "UserMongoAddress": "mongodb-user:27017", + "KnativeDomainName": "" } diff --git a/hotelReservation/dialer/dialer.go b/hotelReservation/dialer/dialer.go index c4112b613..3a27c628b 100644 --- a/hotelReservation/dialer/dialer.go +++ b/hotelReservation/dialer/dialer.go @@ -4,8 +4,8 @@ import ( "fmt" "time" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/harlow/go-micro-services/tls" consul "github.com/hashicorp/consul/api" lb "github.com/olivere/grpc/lb/consul" opentracing "github.com/opentracing/opentracing-go" diff --git a/hotelReservation/docker-compose-swarm.yml b/hotelReservation/docker-compose-swarm.yml index e89e773a2..27eb2a0dc 100644 --- a/hotelReservation/docker-compose-swarm.yml +++ b/hotelReservation/docker-compose-swarm.yml @@ -1,7 +1,7 @@ version: "3" services: consul: - image: consul:latest + image: hashicorp/consul:latest ports: - "8300:8300" - "8400:8400" @@ -244,4 +244,3 @@ volumes: recommendation: reservation: user: - diff --git a/hotelReservation/docker-compose.yml b/hotelReservation/docker-compose.yml index 7c035a91f..a06df23cf 100644 --- a/hotelReservation/docker-compose.yml +++ b/hotelReservation/docker-compose.yml @@ -12,10 +12,11 @@ services: frontend: environment: - TLS + - GC - JAEGER_SAMPLE_RATIO - LOG_LEVEL build: . - image: nevenas/dsbhotel + image: hotel_reserv_frontend_single_node entrypoint: frontend container_name: 'hotel_reserv_frontend' ports: @@ -27,7 +28,9 @@ services: profile: environment: - TLS + - GC - JAEGER_SAMPLE_RATIO + - MEMC_TIMEOUT - LOG_LEVEL build: . image: hotel_reserv_profile_single_node @@ -39,21 +42,6 @@ services: - consul restart: always - review: - environment: - - TLS - - JAEGER_SAMPLE_RATIO - - LOG_LEVEL - build: . - image: nevenas/dsbhotel - entrypoint: review - container_name: 'hotel_reserv_review' - depends_on: - - mongodb-review - - memcached-review - - consul - restart: always - search: build: . image: hotel_reserv_search_single_node @@ -64,6 +52,7 @@ services: restart: always environment: - TLS + - GC - JAEGER_SAMPLE_RATIO - LOG_LEVEL # - GRPC_GO_LOG_VERBOSITY_LEVEL=2 @@ -72,6 +61,7 @@ services: geo: environment: - TLS + - GC - JAEGER_SAMPLE_RATIO - LOG_LEVEL build: . @@ -86,7 +76,9 @@ services: rate: environment: - TLS + - GC - JAEGER_SAMPLE_RATIO + - MEMC_TIMEOUT - LOG_LEVEL build: . image: hotel_reserv_rate_single_node @@ -101,6 +93,7 @@ services: recommendation: environment: - TLS + - GC - JAEGER_SAMPLE_RATIO - LOG_LEVEL build: . @@ -115,6 +108,7 @@ services: user: environment: - TLS + - GC - JAEGER_SAMPLE_RATIO - LOG_LEVEL build: . @@ -129,7 +123,9 @@ services: reservation: environment: - TLS + - GC - JAEGER_SAMPLE_RATIO + - MEMC_TIMEOUT - LOG_LEVEL build: . image: hotel_reserv_rsv_single_node @@ -179,18 +175,6 @@ services: options: max-size: 50m - memcached-review: - image: memcached - # hostname: user-memcached - container_name: 'hotel_reserv_review_mmc' - restart: always - environment: - - MEMCACHED_CACHE_SIZE=128 - - MEMCACHED_THREADS=2 - logging: - options: - max-size: 50m - memcached-reserve: image: memcached # hostname: user-memcached @@ -219,14 +203,6 @@ services: volumes: - profile:/data/db - mongodb-review: - image: mongo:4.4.6 - container_name: 'hotel_reserv_review_mongo' - hostname: review-db - restart: always - volumes: - - review:/data/db - mongodb-rate: image: mongo:4.4.6 container_name: 'hotel_reserv_rate_mongo' @@ -266,4 +242,3 @@ volumes: recommendation: reservation: user: - review: diff --git a/hotelReservation/go.mod b/hotelReservation/go.mod new file mode 100644 index 000000000..a12e04fcb --- /dev/null +++ b/hotelReservation/go.mod @@ -0,0 +1,53 @@ +module github.com/delimitrou/DeathStarBench/hotelreservation + +go 1.17 + +require ( + github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 + github.com/golang/protobuf v1.0.0 + github.com/google/uuid v1.4.0 + github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20171214222146-0e7658f8ee99 + github.com/hailocab/go-geoindex v0.0.0-20160127134810-64631bfe9711 + github.com/hashicorp/consul v1.0.6 + github.com/olivere/grpc v1.0.0 + github.com/opentracing-contrib/go-stdlib v0.0.0-20180308002341-f6b9967a3c69 + github.com/opentracing/opentracing-go v1.2.0 + github.com/rs/zerolog v1.31.0 + github.com/uber/jaeger-client-go v2.30.0+incompatible + go.mongodb.org/mongo-driver v1.12.2 + golang.org/x/net v0.17.0 + google.golang.org/grpc v1.11.2 +) + +require ( + github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect + github.com/golang/glog v1.1.2 // indirect + github.com/golang/snappy v0.0.1 // indirect + github.com/google/go-cmp v0.5.4 // indirect + github.com/hashicorp/go-cleanhttp v0.0.0-20171218145408-d5fe4b57a186 // indirect + github.com/hashicorp/go-immutable-radix v1.0.0 // indirect + github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect + github.com/hashicorp/golang-lru v0.5.0 // indirect + github.com/hashicorp/serf v0.8.2 // indirect + github.com/klauspost/compress v1.13.6 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/stretchr/testify v1.7.0 // indirect + github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/genproto v0.0.0-20180306020942-df60624c1e9b // indirect +) diff --git a/hotelReservation/go.sum b/hotelReservation/go.sum new file mode 100644 index 000000000..cb14f7ba4 --- /dev/null +++ b/hotelReservation/go.sum @@ -0,0 +1,227 @@ +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= +github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/protobuf v1.0.0 h1:lsek0oXi8iFE9L+EXARyHIjU5rlWIhhTkjDz3vHhWWQ= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20171214222146-0e7658f8ee99 h1:bTRV2bQrg85E7ZeeyQfHX3GyfidLrNzVoyq7epx0bTw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20171214222146-0e7658f8ee99/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hailocab/go-geoindex v0.0.0-20160127134810-64631bfe9711 h1:Oi8hPOZX0gaM2sPVXse2bMpfOjP47a7O61YuB6Z4sGk= +github.com/hailocab/go-geoindex v0.0.0-20160127134810-64631bfe9711/go.mod h1:+v2qJ3UZe4q2GfgZO4od004F/cMgJbmPSs7dD/ZMUkY= +github.com/hashicorp/consul v1.0.6 h1:N5444NVNdT/FZddKtLqlYVyY47RmoSrBMxaAdtMgvRc= +github.com/hashicorp/consul v1.0.6/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.0.0-20171218145408-d5fe4b57a186 h1:URgjUo+bs1KwatoNbwG0uCO4dHN4r1jsp4a5AGgHRjo= +github.com/hashicorp/go-cleanhttp v0.0.0-20171218145408-d5fe4b57a186/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747 h1:eQox4Rh4ewJF+mqYPxCkmBAirRnPaHEB26UkNuPyjlk= +github.com/mitchellh/go-homedir v0.0.0-20161203194507-b8bc1bf76747/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/olivere/grpc v1.0.0 h1:5BRWyJD2IEJpXtkWBQa+8xoZB62IxIs8N5IRDC7Z4GU= +github.com/olivere/grpc v1.0.0/go.mod h1:7QAZ2LTuxstIppsx92iigF/F4baKmlyldhW4C0S4Qlk= +github.com/opentracing-contrib/go-stdlib v0.0.0-20180308002341-f6b9967a3c69 h1:xwxJZjgtaEhkTCXKdcV9+ZSIUox2tcv0a5oYzy6p4zE= +github.com/opentracing-contrib/go-stdlib v0.0.0-20180308002341-f6b9967a3c69/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.12.2 h1:gbWY1bJkkmUB9jjZzcdhOL8O85N9H+Vvsf2yFN0RDws= +go.mongodb.org/mongo-driver v1.12.2/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/genproto v0.0.0-20180306020942-df60624c1e9b h1:XeiFoG4FHSBJUL3qKCkMrkwBFRXB+hyQiTPg82JUssI= +google.golang.org/genproto v0.0.0-20180306020942-df60624c1e9b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.11.2 h1:Xr3u0vazLhwRo8dtrHl3O9S9IJluoOiXN0L+RoBZVfo= +google.golang.org/grpc v1.11.2/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/hotelReservation/helm-chart/README.md b/hotelReservation/helm-chart/README.md new file mode 100644 index 000000000..13011128a --- /dev/null +++ b/hotelReservation/helm-chart/README.md @@ -0,0 +1,126 @@ +# Hotel Reservation Microservices Helm Chart # + +# What is Helm Chart ## +Helm charts are packages containing Kubernetes yaml files. Its main goal is to automate the deployment of an application on a Kubernetes cluster. It allows for defining the behaviour of an application and an easy way of manipulating application's parameters. Packages are easily portable across platforms. + +## Purpose of this project ## +The main goal of this project is to automate the process of deploying Hotel Reservation Microservices on a Kubernetes cluster natively using helm chart. + +## Structure of helm chart ## +Every microservice is packaged into its own isolated helm chart. All these packages are assembled under one main helm chart. Microservices share the same deployment, service and configmap files templates which are parameterized using values from `values.yaml` file in each microsevice package. Helm charts also share the same config files. The main helm chart contains global values which are shared among microservices but can be individually overridden. + +## Service config file ## +All Hotel Reservation services (that use the all-purpose hotel reservation microservices image) will use the config file found under `templates/configs` in the main helm chart. +``` +templates/ + configs/ + service-config.tpl +``` + +## Deployment ## +In order to deploy services using helm chart, helm needs to be installed (https://helm.sh/). +The following line shows a default deployment of Hotel Reservation Microservices using helm chart: + +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH +``` + +### Setting namespace ### + +``` +helm install RELEASE_NAME HELM_CHAHELM_CHART_REPO_PATHRT_PATH -n NAMESPACE +``` + +### Overriding default values ### + +#### Default replicas #### +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH --set global.replicas=2 +``` + +#### Default image pull policy #### +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH --set-string global.imagePullPolicy=Always +``` + +#### frontend pod replicas count #### +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH --set frontend.replicas=3 +``` + +### Setting topology spread constraints ### +Kubernetes allows for controlling pods spread accross the cluster. We can specify the same spread constraints for all the pods or for the given pod. + +(https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) + +#### Same spread constrains for all the pods #### +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH \ + --set-string global.topologySpreadConstraints="- maxSkew: 1 + topologyKey: node + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + service: {{ .Values.name }}" +``` + +#### Unique spread constrains for frontend #### +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH \ + --set-string frontend.topologySpreadConstraints="- maxSkew: 1 + topologyKey: node + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + service: {{ .Values.name }}" +``` +Note: indentations are important in strings that are being passed as parameters. + + +### Setting resources ### +Kubernetes allows resources (CPU, memory) to be set and assigned to a container. By default, in the helm chart, none of the containers in any service has any resource constraints. We can specify the same resource constraints for all the containers or for the given one. +Example: + +#### Same resources for all the containers #### +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH \ + --set-string global.resources="requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "2"" +``` + +#### Setting resources for the frontend container #### + +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH \ + --set-string frontend.container.resources="requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "2"" +``` + +#### Same resources for all the containers and unique for frontend #### +``` +helm install RELEASE_NAME HELM_CHART_REPO_PATH \ + --set-string global.resources="requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "2"" \ + --set-string frontend.container.resources="requests: + memory: "64Mi" + cpu: "500m" + limits: + memory: "128Mi" + cpu: "4"" +``` +Note: indentations are important in strings that are being passed as parameters. + +If an entry under resources is not specified the value is retrieved from global values (which can be overriden during deployment).
+(Documnetation on Kubernetes container resources management: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) diff --git a/hotelReservation/helm-chart/hotelreservation/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/Chart.yaml new file mode 100644 index 000000000..d2c8ec358 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/Chart.yaml @@ -0,0 +1,46 @@ +apiVersion: v2 +name: hotel-reservation +description: Helm chart for HotelReservation service in DeathStarBench +type: application +version: 0.1.0 +appVersion: "1.16.0" + +dependencies: + - name: consul + version: 0.1.0 + - name: frontend + version: 0.1.0 + - name: geo + version: 0.1.0 + - name: jaeger + version: 0.1.0 + - name: memcached-profile + version: 0.1.0 + - name: memcached-rate + version: 0.1.0 + - name: memcached-reserve + version: 0.1.0 + - name: mongodb-geo + version: 0.1.0 + - name: mongodb-profile + version: 0.1.0 + - name: mongodb-rate + version: 0.1.0 + - name: mongodb-recommendation + version: 0.1.0 + - name: mongodb-reservation + version: 0.1.0 + - name: mongodb-user + version: 0.1.0 + - name: profile + version: 0.1.0 + - name: rate + version: 0.1.0 + - name: recommendation + version: 0.1.0 + - name: reservation + version: 0.1.0 + - name: search + version: 0.1.0 + - name: user + version: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/consul/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/consul/Chart.yaml new file mode 100644 index 000000000..7c8f8271d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/consul/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: consul +description: consul microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/consul/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/consul/templates/deployment.yaml new file mode 100644 index 000000000..0121ff273 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/consul/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeployment" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/consul/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/consul/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/consul/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/consul/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/consul/values.yaml new file mode 100644 index 000000000..704248293 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/consul/values.yaml @@ -0,0 +1,25 @@ +name: consul + +replicas: 1 + +ports: + - port: 8300 + targetPort: 8300 + - port: 8400 + targetPort: 8400 + - port: 8500 + targetPort: 8500 + - port: 8600 + protocol: UDP + targetPort: 53 + +container: + image: hashicorp/consul + imageVersion: 1.13.2 + name: consul + ports: + - containerPort: 8300 + - containerPort: 8400 + - containerPort: 8500 + - containerPort: 53 + protocol: UDP diff --git a/hotelReservation/helm-chart/hotelreservation/charts/frontend/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/frontend/Chart.yaml new file mode 100644 index 000000000..aeb847bea --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/frontend/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: frontend +description: frontend microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/frontend/configs/service-config b/hotelReservation/helm-chart/hotelreservation/charts/frontend/configs/service-config new file mode 100644 index 000000000..59acbee3a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/frontend/configs/service-config @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.service-config.json" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/configmap.yaml b/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/configmap.yaml new file mode 100644 index 000000000..3334e5fda --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/configmap.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseConfigMap" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/deployment.yaml new file mode 100644 index 000000000..6f06bf63b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentServices" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/frontend/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/frontend/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/frontend/values.yaml new file mode 100644 index 000000000..ec1171208 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/frontend/values.yaml @@ -0,0 +1,17 @@ +name: frontend + +ports: + - port: 5000 + targetPort: 5000 + +container: + command: ./frontend + image: deathstarbench/hotel-reservation + name: hotel-reserv-frontend + ports: + - containerPort: 5000 + +configMaps: + - name: service-config.json + mountPath: config.json + value: service-config diff --git a/hotelReservation/helm-chart/hotelreservation/charts/geo/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/geo/Chart.yaml new file mode 100644 index 000000000..8ab192402 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/geo/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: geo +description: geo microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/geo/configs/service-config b/hotelReservation/helm-chart/hotelreservation/charts/geo/configs/service-config new file mode 100644 index 000000000..59acbee3a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/geo/configs/service-config @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.service-config.json" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/configmap.yaml b/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/configmap.yaml new file mode 100644 index 000000000..3334e5fda --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/configmap.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseConfigMap" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/deployment.yaml new file mode 100644 index 000000000..6f06bf63b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentServices" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/geo/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/geo/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/geo/values.yaml new file mode 100644 index 000000000..80c7aabb8 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/geo/values.yaml @@ -0,0 +1,17 @@ +name: geo + +ports: + - port: 8083 + targetPort: 8083 + +container: + command: ./geo + image: deathstarbench/hotel-reservation + name: hotel-reserv-geo + ports: + - containerPort: 8083 + +configMaps: + - name: service-config.json + mountPath: config.json + value: service-config diff --git a/hotelReservation/helm-chart/hotelreservation/charts/jaeger/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/jaeger/Chart.yaml new file mode 100644 index 000000000..1a753341a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/jaeger/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: jaeger +description: jaeger microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/jaeger/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/jaeger/templates/deployment.yaml new file mode 100644 index 000000000..0121ff273 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/jaeger/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeployment" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/jaeger/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/jaeger/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/jaeger/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/jaeger/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/jaeger/values.yaml new file mode 100644 index 000000000..59fc7e8f4 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/jaeger/values.yaml @@ -0,0 +1,41 @@ +name: jaeger + +replicas: 1 + +ports: + - port: 14269 + targetPort: 14269 + - port: 5778 + targetPort: 5778 + - port: 14268 + targetPort: 14268 + - port: 14267 + targetPort: 14267 + - port: 16686 + targetPort: 16686 + - port: 5775 + protocol: UDP + targetPort: 5775 + - port: 6831 + protocol: UDP + targetPort: 6831 + - port: 6832 + protocol: UDP + targetPort: 6832 + +container: + image: jaegertracing/all-in-one + imageVersion: 1.21.0 + name: hotel-reserv-jaeger + ports: + - containerPort: 14269 + - containerPort: 5778 + - containerPort: 14268 + - containerPort: 14267 + - containerPort: 16686 + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/Chart.yaml new file mode 100644 index 000000000..977bddc82 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: memcached-profile +description: memcached-profile microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/templates/deployment.yaml new file mode 100644 index 000000000..381bc2683 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMemcached" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/templates/service.yaml new file mode 100644 index 000000000..1638569c2 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseServiceMemcached" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/values.yaml new file mode 100644 index 000000000..8dd2db90a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-profile/values.yaml @@ -0,0 +1,12 @@ +name: memcached-profile + +ports: + - port: 11213 + targetPort: 11211 + +container: + image: memcached + imageVersion: 1.6.17 + name: hotel-reserv-profile-mmc + ports: + - containerPort: 11211 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/Chart.yaml new file mode 100644 index 000000000..6a8551bb0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: memcached-rate +description: memcached-rate microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/templates/deployment.yaml new file mode 100644 index 000000000..381bc2683 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMemcached" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/templates/service.yaml new file mode 100644 index 000000000..1638569c2 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseServiceMemcached" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/values.yaml new file mode 100644 index 000000000..82ddf30b7 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-rate/values.yaml @@ -0,0 +1,12 @@ +name: memcached-rate + +ports: + - port: 11212 + targetPort: 11211 + +container: + image: memcached + imageVersion: 1.6.17 + name: hotel-reserv-rate-mmc + ports: + - containerPort: 11211 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/Chart.yaml new file mode 100644 index 000000000..6d10ad118 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: memcached-reserve +description: memcached-reserve microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/templates/deployment.yaml new file mode 100644 index 000000000..381bc2683 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMemcached" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/templates/service.yaml new file mode 100644 index 000000000..1638569c2 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseServiceMemcached" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/values.yaml new file mode 100644 index 000000000..8b0554493 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/memcached-reserve/values.yaml @@ -0,0 +1,12 @@ +name: memcached-reserve + +ports: + - port: 11214 + targetPort: 11211 + +container: + image: memcached + imageVersion: 1.6.17 + name: hotel-reserv-reservation-mmc + ports: + - containerPort: 11211 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/Chart.yaml new file mode 100644 index 000000000..495fd2f34 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: mongodb-geo +description: mongodb-geo microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/deployment.yaml new file mode 100644 index 000000000..6e415793d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMongoDB" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/persistentVolume.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/persistentVolume.yaml new file mode 100644 index 000000000..1c946dd4b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/persistentVolume.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolume" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/persistentVolumeClaim.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/persistentVolumeClaim.yaml new file mode 100644 index 000000000..29ebe5a22 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/persistentVolumeClaim.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolumeClaim" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/values.yaml new file mode 100644 index 000000000..44bb15a1d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-geo/values.yaml @@ -0,0 +1,14 @@ +name: mongodb-geo + +replicas: 1 + +ports: + - port: 27018 + targetPort: 27017 + +container: + image: mongo + imageVersion: 5.0 + name: hotel-reserv-geo-mongo + ports: + - containerPort: 27017 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/Chart.yaml new file mode 100644 index 000000000..66ce2bddb --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: mongodb-profile +description: mongodb-profile microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/deployment.yaml new file mode 100644 index 000000000..6e415793d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMongoDB" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/persistentVolume.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/persistentVolume.yaml new file mode 100644 index 000000000..1c946dd4b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/persistentVolume.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolume" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/persistentVolumeClaim.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/persistentVolumeClaim.yaml new file mode 100644 index 000000000..29ebe5a22 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/persistentVolumeClaim.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolumeClaim" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/values.yaml new file mode 100644 index 000000000..ddb4d1a2c --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-profile/values.yaml @@ -0,0 +1,14 @@ +name: mongodb-profile + +replicas: 1 + +ports: + - port: 27019 + targetPort: 27017 + +container: + image: mongo + imageVersion: 5.0 + name: hotel-reserv-profile-mongo + ports: + - containerPort: 27017 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/Chart.yaml new file mode 100644 index 000000000..2f2237573 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: mongodb-rate +description: mongodb-rate microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/deployment.yaml new file mode 100644 index 000000000..6e415793d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMongoDB" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/persistentVolume.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/persistentVolume.yaml new file mode 100644 index 000000000..1c946dd4b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/persistentVolume.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolume" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/persistentVolumeClaim.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/persistentVolumeClaim.yaml new file mode 100644 index 000000000..29ebe5a22 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/persistentVolumeClaim.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolumeClaim" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/values.yaml new file mode 100644 index 000000000..9fc2fc374 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-rate/values.yaml @@ -0,0 +1,14 @@ +name: mongodb-rate + +replicas: 1 + +ports: + - port: 27020 + targetPort: 27017 + +container: + image: mongo + imageVersion: 5.0 + name: hotel-reserv-rate-mongo + ports: + - containerPort: 27017 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/Chart.yaml new file mode 100644 index 000000000..f1b67ca7e --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: mongodb-recommendation +description: mongodb-recommendation microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/deployment.yaml new file mode 100644 index 000000000..6e415793d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMongoDB" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/persistentVolume.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/persistentVolume.yaml new file mode 100644 index 000000000..1c946dd4b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/persistentVolume.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolume" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/persistentVolumeClaim.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/persistentVolumeClaim.yaml new file mode 100644 index 000000000..29ebe5a22 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/persistentVolumeClaim.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolumeClaim" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/values.yaml new file mode 100644 index 000000000..d563f2a5d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-recommendation/values.yaml @@ -0,0 +1,14 @@ +name: mongodb-recommendation + +replicas: 1 + +ports: + - port: 27021 + targetPort: 27017 + +container: + image: mongo + imageVersion: 5.0 + name: hotel-reserv-recommendation-mongo + ports: + - containerPort: 27017 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/Chart.yaml new file mode 100644 index 000000000..e0a7c2451 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: mongodb-reservation +description: mongodb-reservation microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/deployment.yaml new file mode 100644 index 000000000..6e415793d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMongoDB" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/persistentVolume.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/persistentVolume.yaml new file mode 100644 index 000000000..1c946dd4b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/persistentVolume.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolume" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/persistentVolumeClaim.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/persistentVolumeClaim.yaml new file mode 100644 index 000000000..29ebe5a22 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/persistentVolumeClaim.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolumeClaim" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/values.yaml new file mode 100644 index 000000000..f1c0863ff --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-reservation/values.yaml @@ -0,0 +1,14 @@ +name: mongodb-reservation + +replicas: 1 + +ports: + - port: 27022 + targetPort: 27017 + +container: + image: mongo + imageVersion: 5.0 + name: hotel-reserv-reservation-mongo + ports: + - containerPort: 27017 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/Chart.yaml new file mode 100644 index 000000000..ebd97b851 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: mongodb-user +description: mongodb-user microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/deployment.yaml new file mode 100644 index 000000000..6e415793d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentMongoDB" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/persistentVolume.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/persistentVolume.yaml new file mode 100644 index 000000000..1c946dd4b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/persistentVolume.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolume" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/persistentVolumeClaim.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/persistentVolumeClaim.yaml new file mode 100644 index 000000000..29ebe5a22 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/persistentVolumeClaim.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.basePersistentVolumeClaim" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/values.yaml new file mode 100644 index 000000000..c94f85064 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/mongodb-user/values.yaml @@ -0,0 +1,14 @@ +name: mongodb-user + +replicas: 1 + +ports: + - port: 27023 + targetPort: 27017 + +container: + image: mongo + imageVersion: 5.0 + name: hotel-reserv-user-mongo + ports: + - containerPort: 27017 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/profile/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/profile/Chart.yaml new file mode 100644 index 000000000..71bea6a05 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/profile/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: profile +description: profile microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/profile/configs/service-config b/hotelReservation/helm-chart/hotelreservation/charts/profile/configs/service-config new file mode 100644 index 000000000..59acbee3a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/profile/configs/service-config @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.service-config.json" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/configmap.yaml b/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/configmap.yaml new file mode 100644 index 000000000..3334e5fda --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/configmap.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseConfigMap" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/deployment.yaml new file mode 100644 index 000000000..6f06bf63b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentServices" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/profile/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/profile/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/profile/values.yaml new file mode 100644 index 000000000..57f90059e --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/profile/values.yaml @@ -0,0 +1,17 @@ +name: profile + +ports: + - port: 8081 + targetPort: 8081 + +container: + command: ./profile + image: deathstarbench/hotel-reservation + name: hotel-reserv-profile + ports: + - containerPort: 8081 + +configMaps: + - name: service-config.json + mountPath: config.json + value: service-config diff --git a/hotelReservation/helm-chart/hotelreservation/charts/rate/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/rate/Chart.yaml new file mode 100644 index 000000000..642550aa9 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/rate/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: rate +description: rate microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/rate/configs/service-config b/hotelReservation/helm-chart/hotelreservation/charts/rate/configs/service-config new file mode 100644 index 000000000..59acbee3a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/rate/configs/service-config @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.service-config.json" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/configmap.yaml b/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/configmap.yaml new file mode 100644 index 000000000..3334e5fda --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/configmap.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseConfigMap" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/deployment.yaml new file mode 100644 index 000000000..6f06bf63b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentServices" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/rate/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/rate/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/rate/values.yaml new file mode 100644 index 000000000..f5b50ce4f --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/rate/values.yaml @@ -0,0 +1,17 @@ +name: rate + +ports: + - port: 8084 + targetPort: 8084 + +container: + command: ./rate + image: deathstarbench/hotel-reservation + name: hotel-reserv-rate + ports: + - containerPort: 8084 + +configMaps: + - name: service-config.json + mountPath: config.json + value: service-config diff --git a/hotelReservation/helm-chart/hotelreservation/charts/recommendation/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/Chart.yaml new file mode 100644 index 000000000..7351d968c --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: recommendation +description: recommendation microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/recommendation/configs/service-config b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/configs/service-config new file mode 100644 index 000000000..59acbee3a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/configs/service-config @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.service-config.json" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/configmap.yaml b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/configmap.yaml new file mode 100644 index 000000000..3334e5fda --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/configmap.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseConfigMap" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/deployment.yaml new file mode 100644 index 000000000..6f06bf63b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentServices" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/recommendation/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/values.yaml new file mode 100644 index 000000000..4ca768000 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/recommendation/values.yaml @@ -0,0 +1,17 @@ +name: recommendation + +ports: + - port: 8085 + targetPort: 8085 + +container: + command: ./recommendation + image: deathstarbench/hotel-reservation + name: hotel-reserv-recommendation + ports: + - containerPort: 8085 + +configMaps: + - name: service-config.json + mountPath: config.json + value: service-config diff --git a/hotelReservation/helm-chart/hotelreservation/charts/reservation/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/reservation/Chart.yaml new file mode 100644 index 000000000..7c26b5cfd --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/reservation/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: reservation +description: reservation microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/reservation/configs/service-config b/hotelReservation/helm-chart/hotelreservation/charts/reservation/configs/service-config new file mode 100644 index 000000000..59acbee3a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/reservation/configs/service-config @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.service-config.json" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/configmap.yaml b/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/configmap.yaml new file mode 100644 index 000000000..3334e5fda --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/configmap.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseConfigMap" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/deployment.yaml new file mode 100644 index 000000000..6f06bf63b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentServices" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/reservation/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/reservation/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/reservation/values.yaml new file mode 100644 index 000000000..9c1e20fe1 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/reservation/values.yaml @@ -0,0 +1,17 @@ +name: reservation + +ports: + - port: 8087 + targetPort: 8087 + +container: + command: ./reservation + image: deathstarbench/hotel-reservation + name: hotel-reserv-reservation + ports: + - containerPort: 8087 + +configMaps: + - name: service-config.json + mountPath: config.json + value: service-config diff --git a/hotelReservation/helm-chart/hotelreservation/charts/search/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/search/Chart.yaml new file mode 100644 index 000000000..dce5b4e8f --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/search/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: search +description: search microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/search/configs/service-config b/hotelReservation/helm-chart/hotelreservation/charts/search/configs/service-config new file mode 100644 index 000000000..59acbee3a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/search/configs/service-config @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.service-config.json" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/search/templates/configmap.yaml b/hotelReservation/helm-chart/hotelreservation/charts/search/templates/configmap.yaml new file mode 100644 index 000000000..3334e5fda --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/search/templates/configmap.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseConfigMap" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/search/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/search/templates/deployment.yaml new file mode 100644 index 000000000..6f06bf63b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/search/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentServices" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/search/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/search/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/search/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/search/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/search/values.yaml new file mode 100644 index 000000000..f041cd25a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/search/values.yaml @@ -0,0 +1,17 @@ +name: search + +ports: + - port: 8082 + targetPort: 8082 + +container: + command: ./search + image: deathstarbench/hotel-reservation + name: hotel-reserv-search + ports: + - containerPort: 8082 + +configMaps: + - name: service-config.json + mountPath: config.json + value: service-config diff --git a/hotelReservation/helm-chart/hotelreservation/charts/user/Chart.yaml b/hotelReservation/helm-chart/hotelreservation/charts/user/Chart.yaml new file mode 100644 index 000000000..af4f908cc --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/user/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: user +description: user microservice in HotelReservation +type: application +version: 0.1.0 +appVersion: 0.1.0 diff --git a/hotelReservation/helm-chart/hotelreservation/charts/user/configs/service-config b/hotelReservation/helm-chart/hotelreservation/charts/user/configs/service-config new file mode 100644 index 000000000..59acbee3a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/user/configs/service-config @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.service-config.json" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/user/templates/configmap.yaml b/hotelReservation/helm-chart/hotelreservation/charts/user/templates/configmap.yaml new file mode 100644 index 000000000..3334e5fda --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/user/templates/configmap.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseConfigMap" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/user/templates/deployment.yaml b/hotelReservation/helm-chart/hotelreservation/charts/user/templates/deployment.yaml new file mode 100644 index 000000000..6f06bf63b --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/user/templates/deployment.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseDeploymentServices" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/user/templates/service.yaml b/hotelReservation/helm-chart/hotelreservation/charts/user/templates/service.yaml new file mode 100644 index 000000000..2c87dcdd0 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/user/templates/service.yaml @@ -0,0 +1 @@ +{{ include "hotelreservation.templates.baseService" . }} diff --git a/hotelReservation/helm-chart/hotelreservation/charts/user/values.yaml b/hotelReservation/helm-chart/hotelreservation/charts/user/values.yaml new file mode 100644 index 000000000..413cf521a --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/charts/user/values.yaml @@ -0,0 +1,17 @@ +name: user + +ports: + - port: 8086 + targetPort: 8086 + +container: + command: ./user + image: deathstarbench/hotel-reservation + name: hotel-reserv-user + ports: + - containerPort: 8086 + +configMaps: + - name: service-config.json + mountPath: config.json + value: service-config diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_baseConfigMap.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_baseConfigMap.tpl new file mode 100644 index 000000000..2b513f2ff --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_baseConfigMap.tpl @@ -0,0 +1,15 @@ +{{- define "hotelreservation.templates.baseConfigMap" }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + labels: + hotelreservation/service: {{ .Values.name }}--{{ include "hotel-reservation.fullname" . }} +data: + {{- range $configMap := .Values.configMaps }} + {{- $filePath := printf "configs/%s" $configMap.value }} + {{ $configMap.name -}}: | +{{- tpl ($.Files.Get $filePath) $ | indent 4 -}} + {{- end }} + +{{- end }} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_baseDeployment.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_baseDeployment.tpl new file mode 100644 index 000000000..b2c0d6a96 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_baseDeployment.tpl @@ -0,0 +1,81 @@ +{{- define "hotelreservation.templates.baseDeployment" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "hotel-reservation.labels" . | nindent 4 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} +spec: + replicas: {{ .Values.replicas | default .Values.global.replicas }} + selector: + matchLabels: + {{- include "hotel-reservation.selectorLabels" . | nindent 6 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + app: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + template: + metadata: + labels: + {{- include "hotel-reservation.labels" . | nindent 8 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + app: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + spec: + containers: + {{- with .Values.container }} + - name: "{{ .name }}" + image: {{ .dockerRegistry | default $.Values.global.dockerRegistry }}/{{ .image }}:{{ .imageVersion | default $.Values.global.defaultImageVersion }} + imagePullPolicy: {{ .imagePullPolicy | default $.Values.global.imagePullPolicy }} + ports: + {{- range $cport := .ports }} + - containerPort: {{ $cport.containerPort -}} + {{ end }} + {{- if .environments }} + env: + {{- range $e := .environments }} + - name: {{ $e.name }} + value: "{{ (tpl ($e.value | toString) $) }}" + {{ end -}} + {{ end -}} + {{- if .command}} + command: + - {{ .command }} + {{- end -}} + {{- if .args}} + args: + {{- range $arg := .args}} + - {{ $arg }} + {{- end -}} + {{- end }} + {{- if .resources }} + resources: + {{ tpl .resources $ | nindent 10 | trim }} + {{- else if hasKey $.Values.global "resources" }} + resources: + {{ tpl $.Values.global.resources $ | nindent 10 | trim }} + {{- end }} + {{- end -}} + {{- if hasKey .Values "topologySpreadConstraints" }} + topologySpreadConstraints: + {{ tpl .Values.topologySpreadConstraints . | nindent 6 | trim }} + {{- else if hasKey $.Values.global "topologySpreadConstraints" }} + topologySpreadConstraints: + {{ tpl $.Values.global.topologySpreadConstraints . | nindent 6 | trim }} + {{- end }} + hostname: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + restartPolicy: {{ .Values.restartPolicy | default .Values.global.restartPolicy}} + {{- if .Values.affinity }} + affinity: {{- toYaml .Values.affinity | nindent 8 }} + {{- else if hasKey $.Values.global "affinity" }} + affinity: {{- toYaml .Values.global.affinity | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- toYaml .Values.tolerations | nindent 8 }} + {{- else if hasKey $.Values.global "tolerations" }} + tolerations: {{- toYaml .Values.global.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- else if hasKey $.Values.global "nodeSelector" }} + nodeSelector: {{- toYaml .Values.global.nodeSelector | nindent 8 }} + {{- end }} +{{- end}} \ No newline at end of file diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentMemcached.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentMemcached.tpl new file mode 100644 index 000000000..1222d7052 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentMemcached.tpl @@ -0,0 +1,100 @@ +{{- define "hotelreservation.templates.baseDeploymentMemcached" }} +{{- $count := add .Values.global.memcached.HACount 1 | int }} +{{- range $key, $item := untilStep 1 $count 1 }} +{{- $rangeItem := $item -}} +{{- with $ }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "hotel-reservation.labels" . | nindent 4 }} + {{- include "hotel-reservation.backendLabels" . | nindent 4 }} + service: {{ .Values.name }}-{{ $rangeItem }}-{{ include "hotel-reservation.fullname" . }} + name: {{ .Values.name }}-{{ $rangeItem }}-{{ include "hotel-reservation.fullname" . }} +spec: + replicas: {{ .Values.replicas | default .Values.global.replicas }} + selector: + matchLabels: + {{- include "hotel-reservation.selectorLabels" . | nindent 6 }} + {{- include "hotel-reservation.backendLabels" . | nindent 6 }} + service: {{ .Values.name }}-{{ $rangeItem }}-{{ include "hotel-reservation.fullname" . }} + app: {{ .Values.name }}-{{ $rangeItem }}-{{ include "hotel-reservation.fullname" . }} + template: + metadata: + labels: + {{- include "hotel-reservation.labels" . | nindent 8 }} + {{- include "hotel-reservation.backendLabels" . | nindent 8 }} + service: {{ .Values.name }}-{{ $rangeItem }}-{{ include "hotel-reservation.fullname" . }} + app: {{ .Values.name }}-{{ $rangeItem }}-{{ include "hotel-reservation.fullname" . }} + {{- if hasKey $.Values "annotations" }} + annotations: + {{ tpl $.Values.annotations . | nindent 8 | trim }} + {{- else if hasKey $.Values.global "annotations" }} + annotations: + {{ tpl $.Values.global.annotations . | nindent 8 | trim }} + {{- end }} + spec: + containers: + {{- with .Values.container }} + - name: "{{ .name }}" + image: {{ .dockerRegistry | default $.Values.global.dockerRegistry }}/{{ .image }}:{{ .imageVersion | default $.Values.global.defaultImageVersion }} + imagePullPolicy: {{ .imagePullPolicy | default $.Values.global.imagePullPolicy }} + ports: + {{- range $cport := .ports }} + - containerPort: {{ $cport.containerPort -}} + {{ end }} + {{- if hasKey . "environments" }} + env: + {{- range $variable, $value := .environments }} + - name: {{ $variable }} + value: {{ $value | quote }} + {{- end }} + {{- else if hasKey $.Values.global.memcached "environments" }} + env: + {{- range $variable, $value := $.Values.global.memcached.environments }} + - name: {{ $variable }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .args}} + args: + {{- range $arg := .args}} + - {{ $arg | quote }} + {{- end -}} + {{- end }} + {{- if .resources }} + resources: + {{ tpl .resources $ | nindent 10 | trim }} + {{- else if hasKey $.Values.global "resources" }} + resources: + {{ tpl $.Values.global.resources $ | nindent 10 | trim }} + {{- end }} + {{- end -}} + {{- if hasKey .Values "topologySpreadConstraints" }} + topologySpreadConstraints: + {{ tpl .Values.topologySpreadConstraints . | nindent 6 | trim }} + {{- else if hasKey $.Values.global.memcached "topologySpreadConstraints" }} + topologySpreadConstraints: + {{ tpl $.Values.global.memcached.topologySpreadConstraints . | nindent 6 | trim }} + {{- end }} + hostname: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + restartPolicy: {{ .Values.restartPolicy | default .Values.global.restartPolicy}} + {{- if .Values.affinity }} + affinity: {{- toYaml .Values.affinity | nindent 8 }} + {{- else if hasKey $.Values.global "affinity" }} + affinity: {{- toYaml .Values.global.affinity | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- toYaml .Values.tolerations | nindent 8 }} + {{- else if hasKey $.Values.global "tolerations" }} + tolerations: {{- toYaml .Values.global.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- else if hasKey $.Values.global "nodeSelector" }} + nodeSelector: {{- toYaml .Values.global.nodeSelector | nindent 8 }} + {{- end }} +{{- end}} +{{- end}} +{{- end}} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentMongoDB.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentMongoDB.tpl new file mode 100644 index 000000000..539304cb5 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentMongoDB.tpl @@ -0,0 +1,95 @@ +{{- define "hotelreservation.templates.baseDeploymentMongoDB" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "hotel-reservation.labels" . | nindent 4 }} + {{- include "hotel-reservation.backendLabels" . | nindent 4 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} +spec: + replicas: {{ .Values.replicas | default .Values.global.replicas }} + selector: + matchLabels: + {{- include "hotel-reservation.selectorLabels" . | nindent 6 }} + {{- include "hotel-reservation.backendLabels" . | nindent 6 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + app: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + template: + metadata: + labels: + {{- include "hotel-reservation.labels" . | nindent 8 }} + {{- include "hotel-reservation.backendLabels" . | nindent 8 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + app: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + {{- if hasKey $.Values "annotations" }} + annotations: + {{ tpl $.Values.annotations . | nindent 8 | trim }} + {{- else if hasKey $.Values.global "annotations" }} + annotations: + {{ tpl $.Values.global.annotations . | nindent 8 | trim }} + {{- end }} + spec: + containers: + {{- with .Values.container }} + - name: "{{ .name }}" + image: {{ .dockerRegistry | default $.Values.global.dockerRegistry }}/{{ .image }}:{{ .imageVersion | default $.Values.global.defaultImageVersion }} + imagePullPolicy: {{ .imagePullPolicy | default $.Values.global.imagePullPolicy }} + ports: + {{- range $cport := .ports }} + - containerPort: {{ $cport.containerPort -}} + {{ end }} + {{- if .command}} + command: + - {{ .command }} + {{- end -}} + {{- if .args}} + args: + {{- range $arg := .args}} + - {{ $arg }} + {{- end -}} + {{- end }} + {{- if .resources }} + resources: + {{ tpl .resources $ | nindent 10 | trim }} + {{- else if hasKey $.Values.global "resources" }} + resources: + {{ tpl $.Values.global.resources $ | nindent 10 | trim }} + {{- end }} + volumeMounts: + - mountPath: /data/db + name: {{ $.Values.name }}-{{ include "hotel-reservation.fullname" $ }}-path + {{- end }} + volumes: + - name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }}-path + {{- if $.Values.global.mongodb.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }}-pvc + {{- else }} + emptyDir: {} + {{- end }} + {{- if hasKey .Values "topologySpreadConstraints" }} + topologySpreadConstraints: + {{ tpl .Values.topologySpreadConstraints . | nindent 6 | trim }} + {{- else if hasKey $.Values.global.mongodb "topologySpreadConstraints" }} + topologySpreadConstraints: + {{ tpl $.Values.global.mongodb.topologySpreadConstraints . | nindent 6 | trim }} + {{- end }} + hostname: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + restartPolicy: {{ .Values.restartPolicy | default .Values.global.restartPolicy}} + {{- if .Values.affinity }} + affinity: {{- toYaml .Values.affinity | nindent 8 }} + {{- else if hasKey $.Values.global "affinity" }} + affinity: {{- toYaml .Values.global.affinity | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- toYaml .Values.tolerations | nindent 8 }} + {{- else if hasKey $.Values.global "tolerations" }} + tolerations: {{- toYaml .Values.global.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- else if hasKey $.Values.global "nodeSelector" }} + nodeSelector: {{- toYaml .Values.global.nodeSelector | nindent 8 }} + {{- end }} +{{- end}} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentServices.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentServices.tpl new file mode 100644 index 000000000..7f9e923d1 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_baseDeploymentServices.tpl @@ -0,0 +1,108 @@ +{{- define "hotelreservation.templates.baseDeploymentServices" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "hotel-reservation.labels" . | nindent 4 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} +spec: + replicas: {{ .Values.replicas | default .Values.global.replicas }} + selector: + matchLabels: + {{- include "hotel-reservation.selectorLabels" . | nindent 6 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + app: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + template: + metadata: + labels: + {{- include "hotel-reservation.labels" . | nindent 8 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + app: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + {{- if hasKey $.Values "annotations" }} + annotations: + {{ tpl $.Values.annotations . | nindent 8 | trim }} + {{- else if hasKey $.Values.global "annotations" }} + annotations: + {{ tpl $.Values.global.annotations . | nindent 8 | trim }} + {{- end }} + spec: + containers: + {{- with .Values.container }} + - name: "{{ .name }}" + image: {{ .dockerRegistry | default $.Values.global.dockerRegistry }}/{{ .image }}:{{ .imageVersion | default $.Values.global.defaultImageVersion }} + imagePullPolicy: {{ .imagePullPolicy | default $.Values.global.imagePullPolicy }} + ports: + {{- range $cport := .ports }} + - containerPort: {{ $cport.containerPort -}} + {{ end }} + {{- if hasKey . "environments" }} + env: + {{- range $variable, $value := .environments }} + - name: {{ $variable }} + value: {{ $value | quote }} + {{- end }} + {{- else if hasKey $.Values.global.services "environments" }} + env: + {{- range $variable, $value := $.Values.global.services.environments }} + - name: {{ $variable }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .command}} + command: + - {{ .command }} + {{- end -}} + {{- if .args}} + args: + {{- range $arg := .args}} + - {{ $arg }} + {{- end -}} + {{- end }} + {{- if .resources }} + resources: + {{ tpl .resources $ | nindent 10 | trim }} + {{- else if hasKey $.Values.global "resources" }} + resources: + {{ tpl $.Values.global.resources $ | nindent 10 | trim }} + {{- end }} + {{- if $.Values.configMaps }} + volumeMounts: + {{- range $configMap := $.Values.configMaps }} + - name: {{ $.Values.name }}-{{ include "hotel-reservation.fullname" $ }}-config + mountPath: {{ $configMap.mountPath }} + subPath: {{ $configMap.name }} + {{- end }} + {{- end }} + {{- end -}} + {{- if $.Values.configMaps }} + volumes: + - name: {{ $.Values.name }}-{{ include "hotel-reservation.fullname" $ }}-config + configMap: + name: {{ $.Values.name }}-{{ include "hotel-reservation.fullname" $ }} + {{- end }} + {{- if hasKey .Values "topologySpreadConstraints" }} + topologySpreadConstraints: + {{ tpl .Values.topologySpreadConstraints . | nindent 6 | trim }} + {{- else if hasKey $.Values.global "topologySpreadConstraints" }} + topologySpreadConstraints: + {{ tpl $.Values.global.topologySpreadConstraints . | nindent 6 | trim }} + {{- end }} + hostname: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} + restartPolicy: {{ .Values.restartPolicy | default .Values.global.restartPolicy}} + {{- if .Values.affinity }} + affinity: {{- toYaml .Values.affinity | nindent 8 }} + {{- else if hasKey $.Values.global "affinity" }} + affinity: {{- toYaml .Values.global.affinity | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- toYaml .Values.tolerations | nindent 8 }} + {{- else if hasKey $.Values.global "tolerations" }} + tolerations: {{- toYaml .Values.global.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- else if hasKey $.Values.global "nodeSelector" }} + nodeSelector: {{- toYaml .Values.global.nodeSelector | nindent 8 }} + {{- end }} +{{- end}} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_basePersistentVolume.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_basePersistentVolume.tpl new file mode 100644 index 000000000..aa0366df8 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_basePersistentVolume.tpl @@ -0,0 +1,21 @@ +{{- define "hotelreservation.templates.basePersistentVolume" }} +{{- if .Values.global.mongodb.persistentVolume.enabled }} +{{- if .Values.global.mongodb.persistentVolume.hostPath.enabled }} +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }}-pv + labels: + app-name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} +spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + capacity: + storage: {{ .Values.global.mongodb.persistentVolume.size }} + hostPath: + path: {{ .Values.global.mongodb.persistentVolume.hostPath.path }}/{{ .Values.name }}-pv + type: DirectoryOrCreate +{{- end }} +{{- end }} +{{- end }} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_basePersistentVolumeClaim.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_basePersistentVolumeClaim.tpl new file mode 100644 index 000000000..85e3fa7aa --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_basePersistentVolumeClaim.tpl @@ -0,0 +1,17 @@ +{{- define "hotelreservation.templates.basePersistentVolumeClaim" }} +{{- if .Values.global.mongodb.persistentVolume.pvprovisioner.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }}-pvc +spec: + {{- if .Values.global.mongodb.persistentVolume.pvprovisioner.storageClassName }} + storageClassName: {{ .Values.global.mongodb.persistentVolume.pvprovisioner.storageClassName }} + {{- end }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.global.mongodb.persistentVolume.size }} +{{- end }} +{{- end }} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_baseService.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_baseService.tpl new file mode 100644 index 000000000..0dcc05848 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_baseService.tpl @@ -0,0 +1,20 @@ +{{- define "hotelreservation.templates.baseService" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} +spec: + type: {{ .Values.serviceType | default .Values.global.serviceType }} + ports: + {{- range .Values.ports }} + - name: "{{ .port }}" + port: {{ .port }} + {{- if .protocol}} + protocol: {{ .protocol }} + {{- end }} + targetPort: {{ .targetPort }} + {{- end }} + selector: + {{- include "hotel-reservation.selectorLabels" . | nindent 4 }} + service: {{ .Values.name }}-{{ include "hotel-reservation.fullname" . }} +{{- end }} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_baseServiceMemcached.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_baseServiceMemcached.tpl new file mode 100644 index 000000000..8c119990e --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_baseServiceMemcached.tpl @@ -0,0 +1,27 @@ +{{- define "hotelreservation.templates.baseServiceMemcached" }} +{{- $count := add .Values.global.memcached.HACount 1 | int }} +{{- range $key, $item := untilStep 1 $count 1 }} +{{- $rangeItem := $item -}} +{{- with $ }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.name }}-{{ $rangeItem }}-{{ include "hotel-reservation.fullname" . }} +spec: + type: {{ .Values.serviceType | default .Values.global.serviceType }} + ports: + {{- range .Values.ports }} + - name: "{{ .port }}" + port: {{ .port }} + {{- if .protocol}} + protocol: {{ .protocol }} + {{- end }} + targetPort: {{ .targetPort }} + {{- end }} + selector: + {{- include "hotel-reservation.selectorLabels" . | nindent 4 }} + service: {{ .Values.name }}-{{ $rangeItem }}-{{ include "hotel-reservation.fullname" . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/_helpers.tpl b/hotelReservation/helm-chart/hotelreservation/templates/_helpers.tpl new file mode 100644 index 000000000..2f49e33c6 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/_helpers.tpl @@ -0,0 +1,80 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "hotel-reservation.name" -}} +{{- default .Chart.Name .Values.global.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "hotel-reservation.fullname" -}} +{{- if .Values.global.fullnameOverride }} +{{- .Values.global.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Values.global.mainChart .Values.global.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "hotel-reservation.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "hotel-reservation.labels" -}} +helm.sh/chart: {{ include "hotel-reservation.chart" . }} +{{ include "hotel-reservation.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "hotel-reservation.selectorLabels" -}} +app.kubernetes.io/name: {{ include "hotel-reservation.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Generate list of memcached profile service names +Usage: + include "hotel-reservation.generateMemcAddr" (list number name port) + e.g. + "ProfileMemcAddress": include "hotel-reservation.generateMemcAddr" (list . 2 "memcached-profile" 11211) +*/}} +{{- define "hotel-reservation.generateMemcAddr" -}} + {{- $mapToCheck := index . 0 }} + {{- $count := add (index . 1) 1 | int }} + {{- $name := index . 2 }} + {{- $port := index . 3 | int }} + {{- $fullname := include "hotel-reservation.fullname" $mapToCheck }} + {{- $appendix := printf "%s.%s.svc.%s:%d" $fullname $mapToCheck.Release.Namespace $mapToCheck.Values.global.serviceDnsDomain $port }} + {{- $addrlist := list }} + {{- range $key, $item := untilStep 1 $count 1 }} + {{- $addr := printf "%s-%d-%s" $name $item $appendix }} + {{- $addrlist = append $addrlist $addr }} + {{- end }} + {{- join "," $addrlist | toJson }} +{{- end }} + +{{/* +Backend labels +*/}} +{{- define "hotel-reservation.backendLabels" -}} +backend: {{ .Chart.Name | regexFind "[^-]*" }} +{{- end }} diff --git a/hotelReservation/helm-chart/hotelreservation/templates/configs/service-config.tpl b/hotelReservation/helm-chart/hotelreservation/templates/configs/service-config.tpl new file mode 100644 index 000000000..0285fc432 --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/templates/configs/service-config.tpl @@ -0,0 +1,24 @@ +{{- define "hotelreservation.templates.service-config.json" }} +{ + "consulAddress": "consul-{{ include "hotel-reservation.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.serviceDnsDomain }}:8500", + "jaegerAddress": "jaeger-{{ include "hotel-reservation.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.serviceDnsDomain }}:6831", + "FrontendPort": "5000", + "GeoPort": "8083", + "GeoMongoAddress": "mongodb-geo-{{ include "hotel-reservation.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.serviceDnsDomain }}:27018", + "ProfilePort": "8081", + "ProfileMongoAddress": "mongodb-profile-{{ include "hotel-reservation.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.serviceDnsDomain }}:27019", + "ProfileMemcAddress": {{ include "hotel-reservation.generateMemcAddr" (list . .Values.global.memcached.HACount "memcached-profile" 11213)}}, + "RatePort": "8084", + "RateMongoAddress": "mongodb-rate-{{ include "hotel-reservation.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.serviceDnsDomain }}:27020", + "RateMemcAddress": {{ include "hotel-reservation.generateMemcAddr" (list . .Values.global.memcached.HACount "memcached-rate" 11212)}}, + "RecommendPort": "8085", + "RecommendMongoAddress": "mongodb-recommendation-{{ include "hotel-reservation.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.serviceDnsDomain }}:27021", + "ReservePort": "8087", + "ReserveMongoAddress": "mongodb-reservation-{{ include "hotel-reservation.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.serviceDnsDomain }}:27022", + "ReserveMemcAddress": {{ include "hotel-reservation.generateMemcAddr" (list . .Values.global.memcached.HACount "memcached-reserve" 11214)}}, + "SearchPort": "8082", + "UserPort": "8086", + "UserMongoAddress": "mongodb-user-{{ include "hotel-reservation.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.serviceDnsDomain }}:27023" +} +{{- end }} + diff --git a/hotelReservation/helm-chart/hotelreservation/values.yaml b/hotelReservation/helm-chart/hotelreservation/values.yaml new file mode 100644 index 000000000..a80f9662d --- /dev/null +++ b/hotelReservation/helm-chart/hotelreservation/values.yaml @@ -0,0 +1,40 @@ +global: + nameOverride: "" + fullnameOverride: "" + mainChart: hotelres + replicas: 1 + imagePullPolicy: "IfNotPresent" + restartPolicy: Always + serviceType: ClusterIP + dockerRegistry: docker.io + defaultImageVersion: latest + serviceDnsDomain: "cluster.local" + services: + environments: + # TLS enablement + # - 0: Disable + # - 1: Enabled, using default cipher suite based on golang runtime + # - TLS_XXX: Enabled, using the specified Cipher suite, see tls/options.go#L22 + TLS: 0 + LOG_LEVEL: "INFO" + JAEGER_SAMPLE_RATIO: "0.01" + MEMC_TIMEOUT: "2" + GC: "100" + affinity: {} + tolerations: [] + nodeSelector: {} + memcached: + HACount: 1 + environments: + MEMCACHED_CACHE_SIZE: "128" + MEMCACHED_THREADS: "2" + mongodb: + persistentVolume: # use hostPath or pvprovisioner + enabled: false + size: "1Gi" + hostPath: + enabled: false + path: /tmp + pvprovisioner: + enabled: false + storageClassName: ceph-fs # optional diff --git a/hotelReservation/knative/README.md b/hotelReservation/knative/README.md new file mode 100644 index 000000000..6db2f7f22 --- /dev/null +++ b/hotelReservation/knative/README.md @@ -0,0 +1,62 @@ +# Hotel Reservation Knative workloads + +The application implements a hotel reservation service, build with Go and gRPC, and starting from the open-source project https://github.com/harlow/go-micro-services. The initial project is extended in several ways, including adding back-end in-memory and persistent databases, adding a recommender system for obtaining hotel recommendations, and adding the functionality to place a hotel reservation. + +**Hotel Reservation** includes the relationships of end-to-end services are as follows: + + + +In **Knative workloads**, the services marked **Green** are refactored from k8s services to Knative services: + + + +The ``Frontend Service`` and the ``Backend storage services`` are not transfered to knative, which represents the middle layer mostly deal with the high volumes of HTTP traffic in FaaS workloads. + + +## Pre-requirements +- Kubernetes +- Knative Serving +- Istio + +## Running the hotel reservation application +### Before you start + +- Make sure your kubernetes cluster running steadily +- Install ``Knative Serving`` and ``Istio`` + - Install ``knative Serving`` following Doc: + [Install Serving](https://knative.dev/docs/install/yaml-install/serving/install-serving-with-yaml/#install-the-knative-serving-component) + - Install`` a networking layer Istio`` following Doc: + [Install a networking layer Istio for knative](https://knative.dev/docs/install/yaml-install/serving/install-serving-with-yaml/#install-a-networking-layer) + - Configuring DNS ``sslip.so`` as domain name of the ingress gateway IP: + [Configuring DNS for Istio](https://knative.dev/docs/install/installing-istio/#verifying-your-istio-install) + +## Deploy knative services in kubernetes +### Build images +```bash +cd /hotelReservation/knative/scripts +./update-config.sh +cd ../.. +docker build . -t $yourdockername +``` +### Deploy services in kubernetes and knative +```bash +cd /hotelReservation/knative/scripts +./deploy-knative-svc.sh +``` +### Verify knative services are ready +```bash +kubectl get ksvc +# you will see the output like the following +NAME URL LATESTCREATED LATESTREADY READY REASON +srv-geo http://srv-geo.default.10.108.189.25.sslip.io srv-geo-00001 srv-geo-00001 True +srv-profile http://srv-profile.default.10.108.189.25.sslip.io srv-profile-00001 srv-profile-00001 True +srv-rate http://srv-rate.default.10.108.189.25.sslip.io srv-rate-00001 srv-rate-00001 True +srv-recommendation http://srv-recommendation.default.10.108.189.25.sslip.io srv-recommendation-00001 srv-recommendation-00001 True +srv-reservation http://srv-reservation.default.10.108.189.25.sslip.io srv-reservation-00001 srv-reservation-00001 True +srv-search http://srv-search.default.10.108.189.25.sslip.io srv-search-00001 srv-search-00001 True +srv-user http://srv-user.default.10.108.189.25.sslip.io srv-user-00001 srv-user-00001 True +``` +### Workload generation +```bash +../wrk2/wrk -D exp -t -c -d -L -s ./wrk2/scripts/hotel-reservation/mixed-workload_type_1.lua http://x.x.x.x:5000 -R +``` diff --git a/hotelReservation/knative/consul-deployment.yaml b/hotelReservation/knative/consul-deployment.yaml new file mode 100644 index 000000000..684415fd2 --- /dev/null +++ b/hotelReservation/knative/consul-deployment.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: consul + name: consul +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: consul + strategy: {} + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: consul + spec: + containers: + - image: hashicorp/consul:latest + name: consul + ports: + - containerPort: 8300 + - containerPort: 8400 + - containerPort: 8500 + - containerPort: 53 + protocol: UDP + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + restartPolicy: Always +status: {} diff --git a/hotelReservation/knative/consul-service.yaml b/hotelReservation/knative/consul-service.yaml new file mode 100644 index 000000000..d72f1567d --- /dev/null +++ b/hotelReservation/knative/consul-service.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: consul + name: consul +spec: + ports: + - name: "8300" + port: 8300 + targetPort: 8300 + - name: "8400" + port: 8400 + targetPort: 8400 + - name: "8500" + port: 8500 + targetPort: 8500 + - name: "8600" + port: 8600 + protocol: UDP + targetPort: 53 + selector: + io.kompose.service: consul +status: + loadBalancer: {} diff --git a/hotelReservation/knative/frontend-deployment.yaml b/hotelReservation/knative/frontend-deployment.yaml new file mode 100644 index 000000000..ca93936a7 --- /dev/null +++ b/hotelReservation/knative/frontend-deployment.yaml @@ -0,0 +1,41 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: frontend + name: frontend +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: frontend + strategy: {} + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: frontend + spec: + containers: + - command: + - frontend + image: teresaliuchang/hotel_demo:v1.7 + name: hotel-reserv-frontend + ports: + - containerPort: 5000 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + restartPolicy: Always +status: {} diff --git a/hotelReservation/knative/frontend-service.yaml b/hotelReservation/knative/frontend-service.yaml new file mode 100644 index 000000000..0a475f5a5 --- /dev/null +++ b/hotelReservation/knative/frontend-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: frontend + name: frontend +spec: + ports: + - name: "5000" + port: 5000 + targetPort: 5000 + selector: + io.kompose.service: frontend +status: + loadBalancer: {} diff --git a/hotelReservation/knative/geo-persistent-volume.yaml b/hotelReservation/knative/geo-persistent-volume.yaml new file mode 100644 index 000000000..c709d60c6 --- /dev/null +++ b/hotelReservation/knative/geo-persistent-volume.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: geo-pv +spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + storageClassName: geo-storage + hostPath: + path: /data/volumes/geo-pv # Where all the hard drives are mounted + type: DirectoryOrCreate diff --git a/hotelReservation/knative/geo-pvc.yaml b/hotelReservation/knative/geo-pvc.yaml new file mode 100644 index 000000000..31c09cf1e --- /dev/null +++ b/hotelReservation/knative/geo-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: geo-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: geo-storage + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/hotelReservation/knative/hotelReservation_architecture.png b/hotelReservation/knative/hotelReservation_architecture.png new file mode 100644 index 000000000..acc023578 Binary files /dev/null and b/hotelReservation/knative/hotelReservation_architecture.png differ diff --git a/hotelReservation/knative/hotelReservation_knative.png b/hotelReservation/knative/hotelReservation_knative.png new file mode 100644 index 000000000..5a3d1a4a1 Binary files /dev/null and b/hotelReservation/knative/hotelReservation_knative.png differ diff --git a/hotelReservation/knative/jaeger-deployment.yaml b/hotelReservation/knative/jaeger-deployment.yaml new file mode 100644 index 000000000..6e8db7286 --- /dev/null +++ b/hotelReservation/knative/jaeger-deployment.yaml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: jaeger + name: jaeger +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: jaeger + strategy: {} + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: jaeger + spec: + containers: + - image: jaegertracing/all-in-one:latest + name: hotel-reserv-jaeger + ports: + - containerPort: 14269 + - containerPort: 5778 + - containerPort: 14268 + - containerPort: 14267 + - containerPort: 16686 + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + restartPolicy: Always +status: {} diff --git a/hotelReservation/knative/jaeger-service.yaml b/hotelReservation/knative/jaeger-service.yaml new file mode 100644 index 000000000..d12082457 --- /dev/null +++ b/hotelReservation/knative/jaeger-service.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: jaeger + name: jaeger +spec: + ports: + - name: "14269" + port: 14269 + targetPort: 14269 + - name: "5778" + port: 5778 + targetPort: 5778 + - name: "14268" + port: 14268 + targetPort: 14268 + - name: "14267" + port: 14267 + targetPort: 14267 + - name: "16686" + port: 16686 + targetPort: 16686 + - name: "5775" + port: 5775 + protocol: UDP + targetPort: 5775 + - name: "6831" + port: 6831 + protocol: UDP + targetPort: 6831 + - name: "6832" + port: 6832 + protocol: UDP + targetPort: 6832 + selector: + io.kompose.service: jaeger +status: + loadBalancer: {} diff --git a/hotelReservation/knative/memcached-profile-deployment.yaml b/hotelReservation/knative/memcached-profile-deployment.yaml new file mode 100644 index 000000000..1eb64a840 --- /dev/null +++ b/hotelReservation/knative/memcached-profile-deployment.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: memcached-profile + name: memcached-profile +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: memcached-profile + strategy: {} + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: memcached-profile + spec: + containers: + - env: + - name: MEMCACHED_CACHE_SIZE + value: "128" + - name: MEMCACHED_THREADS + value: "2" + image: memcached + name: hotel-reserv-profile-mmc + ports: + - containerPort: 11211 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + restartPolicy: Always +status: {} diff --git a/hotelReservation/knative/memcached-profile-service.yaml b/hotelReservation/knative/memcached-profile-service.yaml new file mode 100644 index 000000000..8c23c5ace --- /dev/null +++ b/hotelReservation/knative/memcached-profile-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: memcached-profile + name: memcached-profile +spec: + ports: + - name: "memcached-profile" + port: 11211 + targetPort: 11211 + selector: + io.kompose.service: memcached-profile +status: + loadBalancer: {} diff --git a/hotelReservation/knative/memcached-rate-deployment.yaml b/hotelReservation/knative/memcached-rate-deployment.yaml new file mode 100644 index 000000000..35776eee0 --- /dev/null +++ b/hotelReservation/knative/memcached-rate-deployment.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: memcached-rate + name: memcached-rate +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: memcached-rate + strategy: {} + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: memcached-rate + spec: + containers: + - env: + - name: MEMCACHED_CACHE_SIZE + value: "128" + - name: MEMCACHED_THREADS + value: "2" + image: memcached + name: hotel-reserv-rate-mmc + ports: + - containerPort: 11211 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + restartPolicy: Always +status: {} \ No newline at end of file diff --git a/hotelReservation/knative/memcached-rate-service.yaml b/hotelReservation/knative/memcached-rate-service.yaml new file mode 100644 index 000000000..7bb046db4 --- /dev/null +++ b/hotelReservation/knative/memcached-rate-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: memcached-rate + name: memcached-rate +spec: + ports: + - name: "memcached-rate" + port: 11211 + targetPort: 11211 + selector: + io.kompose.service: memcached-rate +status: + loadBalancer: {} diff --git a/hotelReservation/knative/memcached-reservation-deployment.yaml b/hotelReservation/knative/memcached-reservation-deployment.yaml new file mode 100644 index 000000000..53e5cf29f --- /dev/null +++ b/hotelReservation/knative/memcached-reservation-deployment.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: memcached-reserve + name: memcached-reserve +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: memcached-reserve + strategy: {} + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: memcached-reserve + spec: + containers: + - env: + - name: MEMCACHED_CACHE_SIZE + value: "128" + - name: MEMCACHED_THREADS + value: "2" + image: memcached + name: hotel-reserv-reservation-mmc + ports: + - containerPort: 11211 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + restartPolicy: Always +status: {} diff --git a/hotelReservation/knative/memcached-reservation-service.yaml b/hotelReservation/knative/memcached-reservation-service.yaml new file mode 100644 index 000000000..a9c2f00bb --- /dev/null +++ b/hotelReservation/knative/memcached-reservation-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: memcached-reserve + name: memcached-reserve +spec: + ports: + - name: "memcached-reserve" + port: 11211 + targetPort: 11211 + selector: + io.kompose.service: memcached-reserve +status: + loadBalancer: {} diff --git a/hotelReservation/knative/mongodb-geo-deployment.yaml b/hotelReservation/knative/mongodb-geo-deployment.yaml new file mode 100644 index 000000000..ac4755043 --- /dev/null +++ b/hotelReservation/knative/mongodb-geo-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-geo + name: mongodb-geo +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: mongodb-geo + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: mongodb-geo + spec: + containers: + - image: mongo:4.4.6 + name: hotel-reserv-geo-mongo + ports: + - containerPort: 27017 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + volumeMounts: + - mountPath: /data/db + name: geo + restartPolicy: Always + volumes: + - name: geo + persistentVolumeClaim: + claimName: geo-pvc +status: {} diff --git a/hotelReservation/knative/mongodb-geo-service.yaml b/hotelReservation/knative/mongodb-geo-service.yaml new file mode 100644 index 000000000..4777c0c57 --- /dev/null +++ b/hotelReservation/knative/mongodb-geo-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-geo + name: mongodb-geo +spec: + ports: + - name: "mongodb-geo" + port: 27017 + targetPort: 27017 + selector: + io.kompose.service: mongodb-geo +status: + loadBalancer: {} diff --git a/hotelReservation/knative/mongodb-profile-deployment.yaml b/hotelReservation/knative/mongodb-profile-deployment.yaml new file mode 100644 index 000000000..d35380d54 --- /dev/null +++ b/hotelReservation/knative/mongodb-profile-deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-profile + name: mongodb-profile +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: mongodb-profile + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: mongodb-profile + spec: + containers: + - image: mongo:4.4.6 + name: hotel-reserv-profile-mongo + ports: + - containerPort: 27017 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + volumeMounts: + - mountPath: /data/db + name: profile + hostname: profile-db + restartPolicy: Always + volumes: + - name: profile + persistentVolumeClaim: + claimName: profile-pvc +status: {} diff --git a/hotelReservation/knative/mongodb-profile-service.yaml b/hotelReservation/knative/mongodb-profile-service.yaml new file mode 100644 index 000000000..26375f267 --- /dev/null +++ b/hotelReservation/knative/mongodb-profile-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-profile + name: mongodb-profile +spec: + ports: + - name: "mongodb-profile" + port: 27017 + targetPort: 27017 + selector: + io.kompose.service: mongodb-profile +status: + loadBalancer: {} diff --git a/hotelReservation/knative/mongodb-rate-deployment.yaml b/hotelReservation/knative/mongodb-rate-deployment.yaml new file mode 100644 index 000000000..d17256484 --- /dev/null +++ b/hotelReservation/knative/mongodb-rate-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-rate + name: mongodb-rate +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: mongodb-rate + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: mongodb-rate + spec: + containers: + - image: mongo:4.4.6 + name: hotel-reserv-rate-mongo + ports: + - containerPort: 27017 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + volumeMounts: + - mountPath: /data/db + name: rate + restartPolicy: Always + volumes: + - name: rate + persistentVolumeClaim: + claimName: rate-pvc +status: {} diff --git a/hotelReservation/knative/mongodb-rate-service.yaml b/hotelReservation/knative/mongodb-rate-service.yaml new file mode 100644 index 000000000..39d0f18ca --- /dev/null +++ b/hotelReservation/knative/mongodb-rate-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-rate + name: mongodb-rate +spec: + ports: + - name: "mongodb-rate" + port: 27017 + targetPort: 27017 + selector: + io.kompose.service: mongodb-rate +status: + loadBalancer: {} diff --git a/hotelReservation/knative/mongodb-recommendation-deployment.yaml b/hotelReservation/knative/mongodb-recommendation-deployment.yaml new file mode 100644 index 000000000..8f16a29fb --- /dev/null +++ b/hotelReservation/knative/mongodb-recommendation-deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-recommendation + name: mongodb-recommendation +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: mongodb-recommendation + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: mongodb-recommendation + spec: + containers: + - image: mongo:4.4.6 + name: hotel-reserv-recommendation-mongo + ports: + - containerPort: 27017 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + volumeMounts: + - mountPath: /data/db + name: recommendation + hostname: recommendation-db + restartPolicy: Always + volumes: + - name: recommendation + persistentVolumeClaim: + claimName: recommendation-pvc +status: {} diff --git a/hotelReservation/knative/mongodb-recommendation-service.yaml b/hotelReservation/knative/mongodb-recommendation-service.yaml new file mode 100644 index 000000000..ff1d82ce0 --- /dev/null +++ b/hotelReservation/knative/mongodb-recommendation-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-recommendation + name: mongodb-recommendation +spec: + ports: + - name: "mongodb-recommendation" + port: 27017 + targetPort: 27017 + selector: + io.kompose.service: mongodb-recommendation +status: + loadBalancer: {} diff --git a/hotelReservation/knative/mongodb-reservation-deployment.yaml b/hotelReservation/knative/mongodb-reservation-deployment.yaml new file mode 100644 index 000000000..01c2e816b --- /dev/null +++ b/hotelReservation/knative/mongodb-reservation-deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-reservation + name: mongodb-reservation +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: mongodb-reservation + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: mongodb-reservation + spec: + containers: + - image: mongo:4.4.6 + name: hotel-reserv-reservation-mongo + ports: + - containerPort: 27017 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + volumeMounts: + - mountPath: /data/db + name: reservation + hostname: reservation-db + restartPolicy: Always + volumes: + - name: reservation + persistentVolumeClaim: + claimName: reservation-pvc +status: {} diff --git a/hotelReservation/knative/mongodb-reservation-service.yaml b/hotelReservation/knative/mongodb-reservation-service.yaml new file mode 100644 index 000000000..6e23d4e70 --- /dev/null +++ b/hotelReservation/knative/mongodb-reservation-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-reservation + name: mongodb-reservation +spec: + ports: + - name: "mongodb-reservation" + port: 27017 + targetPort: 27017 + selector: + io.kompose.service: mongodb-reservation +status: + loadBalancer: {} diff --git a/hotelReservation/knative/mongodb-user-deployment.yaml b/hotelReservation/knative/mongodb-user-deployment.yaml new file mode 100644 index 000000000..1228b6277 --- /dev/null +++ b/hotelReservation/knative/mongodb-user-deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-user + name: mongodb-user +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: mongodb-user + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + sidecar.istio.io/statsInclusionPrefixes: cluster.outbound,cluster_manager,listener_manager,http_mixer_filter,tcp_mixer_filter,server,cluster.xds-grp,listener,connection_manager + sidecar.istio.io/statsInclusionRegexps: http.* + creationTimestamp: null + labels: + io.kompose.service: mongodb-user + spec: + containers: + - image: mongo:4.4.6 + name: hotel-reserv-user-mongo + ports: + - containerPort: 27017 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m + volumeMounts: + - mountPath: /data/db + name: user + hostname: user-db + restartPolicy: Always + volumes: + - name: user + persistentVolumeClaim: + claimName: user-pvc +status: {} \ No newline at end of file diff --git a/hotelReservation/knative/mongodb-user-service.yaml b/hotelReservation/knative/mongodb-user-service.yaml new file mode 100644 index 000000000..a2cd01940 --- /dev/null +++ b/hotelReservation/knative/mongodb-user-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.22.0 (955b78124) + creationTimestamp: null + labels: + io.kompose.service: mongodb-user + name: mongodb-user +spec: + ports: + - name: "mongodb-user" + port: 27017 + targetPort: 27017 + selector: + io.kompose.service: mongodb-user +status: + loadBalancer: {} diff --git a/hotelReservation/knative/profile-persistent-volume.yaml b/hotelReservation/knative/profile-persistent-volume.yaml new file mode 100644 index 000000000..72a76a856 --- /dev/null +++ b/hotelReservation/knative/profile-persistent-volume.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: profile-pv +spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + storageClassName: profile-storage + hostPath: + path: /data/volumes/profile-pv # Where all the hard drives are mounted + type: DirectoryOrCreate diff --git a/hotelReservation/knative/profile-pvc.yaml b/hotelReservation/knative/profile-pvc.yaml new file mode 100644 index 000000000..5cc927086 --- /dev/null +++ b/hotelReservation/knative/profile-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: profile-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: profile-storage + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/hotelReservation/knative/rate-persistent-volume.yaml b/hotelReservation/knative/rate-persistent-volume.yaml new file mode 100644 index 000000000..d23286c41 --- /dev/null +++ b/hotelReservation/knative/rate-persistent-volume.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: rate-pv +spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + storageClassName: rate-storage + hostPath: + path: /data/volumes/rate-pv # Where all the hard drives are mounted + type: DirectoryOrCreate diff --git a/hotelReservation/knative/rate-pvc.yaml b/hotelReservation/knative/rate-pvc.yaml new file mode 100644 index 000000000..fe5c55806 --- /dev/null +++ b/hotelReservation/knative/rate-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rate-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: rate-storage + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/hotelReservation/knative/recommendation-persistent-volume.yaml b/hotelReservation/knative/recommendation-persistent-volume.yaml new file mode 100644 index 000000000..b634ca929 --- /dev/null +++ b/hotelReservation/knative/recommendation-persistent-volume.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: recommendation-pv +spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + storageClassName: recommendation-storage + hostPath: + path: /data/volumes/recommendation-pv # Where all the hard drives are mounted + type: DirectoryOrCreate diff --git a/hotelReservation/knative/recommendation-pvc.yaml b/hotelReservation/knative/recommendation-pvc.yaml new file mode 100644 index 000000000..0364a07d3 --- /dev/null +++ b/hotelReservation/knative/recommendation-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: recommendation-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: recommendation-storage + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/hotelReservation/knative/reservation-persistent-volume.yaml b/hotelReservation/knative/reservation-persistent-volume.yaml new file mode 100644 index 000000000..f93d7be55 --- /dev/null +++ b/hotelReservation/knative/reservation-persistent-volume.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: reservation-pv +spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + storageClassName: reservation-storage + hostPath: + path: /data/volumes/reservation-pv # Where all the hard drives are mounted + type: DirectoryOrCreate diff --git a/hotelReservation/knative/reservation-pvc.yaml b/hotelReservation/knative/reservation-pvc.yaml new file mode 100644 index 000000000..a020d3cbf --- /dev/null +++ b/hotelReservation/knative/reservation-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: reservation-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: reservation-storage + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/hotelReservation/knative/scripts/deploy-knative-svc.sh b/hotelReservation/knative/scripts/deploy-knative-svc.sh new file mode 100644 index 000000000..590c96096 --- /dev/null +++ b/hotelReservation/knative/scripts/deploy-knative-svc.sh @@ -0,0 +1,20 @@ +#!/bin/bash + + +cd $(dirname $0)/.. + +for i in *.yaml +do + kubectl apply -f ${i} & +done +wait +sleep 30 + +kubectl apply -f svc/ & + +echo "Finishing in 30 seconds" +sleep 30 + +kubectl get pods + +cd - >/dev/null diff --git a/hotelReservation/knative/scripts/update-config.sh b/hotelReservation/knative/scripts/update-config.sh new file mode 100644 index 000000000..11a31443b --- /dev/null +++ b/hotelReservation/knative/scripts/update-config.sh @@ -0,0 +1,19 @@ +#!/bin/bash + + +cd $(dirname $0)/../../ + +istioIngress=$(kubectl get svc -n istio-system | grep "istio-ingressgateway" | awk '{print $3}') +namespace=default. +domainName=.sslip.io:80 + + +if [ `grep -c "KnativeDomainName" "config.json"` -ne '0' ];then + echo "Update config: KnativeDomainName" + sed -i '/"KnativeDomainName"/c\ \"KnativeDomainName\": \"'$namespace$istioIngress$domainName'\"' config.json + exit 0 +fi + +echo "Append config: KnativeDomainName" +sed -i '/"UserMongoAddress"/ s/$/&,/' config.json +sed -i '$i\ "KnativeDomainName\": \"'$namespace$istioIngress$domainName'\"' config.json diff --git a/hotelReservation/knative/svc/srv-geo.yaml b/hotelReservation/knative/svc/srv-geo.yaml new file mode 100644 index 000000000..1874124fa --- /dev/null +++ b/hotelReservation/knative/svc/srv-geo.yaml @@ -0,0 +1,23 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: srv-geo +spec: + template: + spec: + containers: + - command: + - geo + env: + - name: DLOG + value: DEBUG + image: teresaliuchang/hotel_demo:v1.7 + name: hotel-reserv-geo + ports: + - name: h2c + containerPort: 8083 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m diff --git a/hotelReservation/knative/svc/srv-profile.yaml b/hotelReservation/knative/svc/srv-profile.yaml new file mode 100644 index 000000000..c17a387f4 --- /dev/null +++ b/hotelReservation/knative/svc/srv-profile.yaml @@ -0,0 +1,23 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: srv-profile +spec: + template: + spec: + containers: + - command: + - profile + env: + - name: DLOG + value: DEBUG + image: teresaliuchang/hotel_demo:v1.7 + name: hotel-reserv-profile + ports: + - name: h2c + containerPort: 8081 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m diff --git a/hotelReservation/knative/svc/srv-rate.yaml b/hotelReservation/knative/svc/srv-rate.yaml new file mode 100644 index 000000000..cfc5f3805 --- /dev/null +++ b/hotelReservation/knative/svc/srv-rate.yaml @@ -0,0 +1,24 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: srv-rate +spec: + template: + spec: + containerConcurrency: 20 + containers: + - command: + - rate + env: + - name: DLOG + value: DEBUG + image: teresaliuchang/hotel_demo:v1.7 + name: hotel-reserv-rate + ports: + - name: h2c + containerPort: 8084 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m diff --git a/hotelReservation/knative/svc/srv-recommendation.yaml b/hotelReservation/knative/svc/srv-recommendation.yaml new file mode 100644 index 000000000..39e8e9710 --- /dev/null +++ b/hotelReservation/knative/svc/srv-recommendation.yaml @@ -0,0 +1,24 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: srv-recommendation +spec: + template: + spec: + containerConcurrency: 20 + containers: + - command: + - recommendation + env: + - name: DLOG + value: DEBUG + image: teresaliuchang/hotel_demo:v1.7 + name: hotel-reserv-recommendation + ports: + - name: h2c + containerPort: 8085 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m diff --git a/hotelReservation/knative/svc/srv-reservation.yaml b/hotelReservation/knative/svc/srv-reservation.yaml new file mode 100644 index 000000000..df88f066c --- /dev/null +++ b/hotelReservation/knative/svc/srv-reservation.yaml @@ -0,0 +1,23 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: srv-reservation +spec: + template: + spec: + containers: + - command: + - reservation + env: + - name: DLOG + value: DEBUG + image: teresaliuchang/hotel_demo:v1.7 + name: hotel-reserv-reservation + ports: + - name: h2c + containerPort: 8087 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m diff --git a/hotelReservation/knative/svc/srv-search.yaml b/hotelReservation/knative/svc/srv-search.yaml new file mode 100644 index 000000000..55b22c69a --- /dev/null +++ b/hotelReservation/knative/svc/srv-search.yaml @@ -0,0 +1,24 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: srv-search +spec: + template: + spec: + containerConcurrency: 20 + containers: + - command: + - search + env: + - name: DLOG + value: DEBUG + image: teresaliuchang/hotel_demo:v1.7 + name: hotel-reserv-search + ports: + - name: h2c + containerPort: 8082 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m diff --git a/hotelReservation/knative/svc/srv-user.yaml b/hotelReservation/knative/svc/srv-user.yaml new file mode 100644 index 000000000..f0fccc91a --- /dev/null +++ b/hotelReservation/knative/svc/srv-user.yaml @@ -0,0 +1,24 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: srv-user +spec: + template: + spec: + containerConcurrency: 20 + containers: + - command: + - user + env: + - name: DLOG + value: DEBUG + image: teresaliuchang/hotel_demo:v1.7 + name: hotel-reserv-user + ports: + - name: h2c + containerPort: 8086 + resources: + requests: + cpu: 100m + limits: + cpu: 1000m diff --git a/hotelReservation/knative/user-persistent-volume.yaml b/hotelReservation/knative/user-persistent-volume.yaml new file mode 100644 index 000000000..188ad39ce --- /dev/null +++ b/hotelReservation/knative/user-persistent-volume.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: user-pv +spec: + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + storageClassName: user-storage + hostPath: + path: /data/volumes/user-pv # Where all the hard drives are mounted + type: DirectoryOrCreate diff --git a/hotelReservation/knative/user-pvc.yaml b/hotelReservation/knative/user-pvc.yaml new file mode 100644 index 000000000..3c44408b6 --- /dev/null +++ b/hotelReservation/knative/user-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: user-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: user-storage + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/hotelReservation/kubernetes/consul/consul-deployment.yaml b/hotelReservation/kubernetes/consul/consul-deployment.yaml index 0012a7c23..684415fd2 100644 --- a/hotelReservation/kubernetes/consul/consul-deployment.yaml +++ b/hotelReservation/kubernetes/consul/consul-deployment.yaml @@ -26,7 +26,7 @@ spec: io.kompose.service: consul spec: containers: - - image: consul:latest + - image: hashicorp/consul:latest name: consul ports: - containerPort: 8300 diff --git a/hotelReservation/kubernetes/frontend/frontend-deployment.yaml b/hotelReservation/kubernetes/frontend/frontend-deployment.yaml index dffb3b20c..966471f76 100644 --- a/hotelReservation/kubernetes/frontend/frontend-deployment.yaml +++ b/hotelReservation/kubernetes/frontend/frontend-deployment.yaml @@ -27,8 +27,8 @@ spec: spec: containers: - command: - - frontend - image: igorrudyk1/hotelreservation:latest + - ./frontend + image: deathstarbench/hotel-reservation:latest name: hotel-reserv-frontend ports: - containerPort: 5000 diff --git a/hotelReservation/kubernetes/geo/geo-deployment.yaml b/hotelReservation/kubernetes/geo/geo-deployment.yaml index c597f909a..626f90820 100644 --- a/hotelReservation/kubernetes/geo/geo-deployment.yaml +++ b/hotelReservation/kubernetes/geo/geo-deployment.yaml @@ -27,8 +27,8 @@ spec: spec: containers: - command: - - geo - image: igorrudyk1/hotelreservation:latest + - ./geo + image: deathstarbench/hotel-reservation:latest name: hotel-reserv-geo ports: - containerPort: 8083 diff --git a/hotelReservation/kubernetes/profile/profile-deployment.yaml b/hotelReservation/kubernetes/profile/profile-deployment.yaml index 85cd0f40f..e4fc8c762 100644 --- a/hotelReservation/kubernetes/profile/profile-deployment.yaml +++ b/hotelReservation/kubernetes/profile/profile-deployment.yaml @@ -27,8 +27,8 @@ spec: spec: containers: - command: - - profile - image: igorrudyk1/hotelreservation:latest + - ./profile + image: deathstarbench/hotel-reservation:latest name: hotel-reserv-profile ports: - containerPort: 8081 diff --git a/hotelReservation/kubernetes/rate/rate-deployment.yaml b/hotelReservation/kubernetes/rate/rate-deployment.yaml index 715accc7a..f13bd4a9f 100644 --- a/hotelReservation/kubernetes/rate/rate-deployment.yaml +++ b/hotelReservation/kubernetes/rate/rate-deployment.yaml @@ -27,8 +27,8 @@ spec: spec: containers: - command: - - rate - image: igorrudyk1/hotelreservation:latest + - ./rate + image: deathstarbench/hotel-reservation:latest name: hotel-reserv-rate ports: - containerPort: 8084 diff --git a/hotelReservation/kubernetes/reccomend/recommendation-deployment.yaml b/hotelReservation/kubernetes/reccomend/recommendation-deployment.yaml index d3200966f..5d48aa3ff 100644 --- a/hotelReservation/kubernetes/reccomend/recommendation-deployment.yaml +++ b/hotelReservation/kubernetes/reccomend/recommendation-deployment.yaml @@ -27,8 +27,8 @@ spec: spec: containers: - command: - - recommendation - image: igorrudyk1/hotelreservation:latest + - ./recommendation + image: deathstarbench/hotel-reservation:latest name: hotel-reserv-recommendation ports: - containerPort: 8085 diff --git a/hotelReservation/kubernetes/reserve/reservation-deployment.yaml b/hotelReservation/kubernetes/reserve/reservation-deployment.yaml index 4ff5ffb1e..17592f2f0 100644 --- a/hotelReservation/kubernetes/reserve/reservation-deployment.yaml +++ b/hotelReservation/kubernetes/reserve/reservation-deployment.yaml @@ -27,8 +27,8 @@ spec: spec: containers: - command: - - reservation - image: igorrudyk1/hotelreservation:latest + - ./reservation + image: deathstarbench/hotel-reservation:latest name: hotel-reserv-reservation ports: - containerPort: 8087 diff --git a/hotelReservation/kubernetes/search/search-deployment.yaml b/hotelReservation/kubernetes/search/search-deployment.yaml index 128b0a815..d4bcc9c61 100644 --- a/hotelReservation/kubernetes/search/search-deployment.yaml +++ b/hotelReservation/kubernetes/search/search-deployment.yaml @@ -27,8 +27,8 @@ spec: spec: containers: - command: - - search - image: igorrudyk1/hotelreservation:latest + - ./search + image: deathstarbench/hotel-reservation:latest name: hotel-reserv-search ports: - containerPort: 8082 diff --git a/hotelReservation/kubernetes/user/user-deployment.yaml b/hotelReservation/kubernetes/user/user-deployment.yaml index 2b6713459..81d3e2a1d 100644 --- a/hotelReservation/kubernetes/user/user-deployment.yaml +++ b/hotelReservation/kubernetes/user/user-deployment.yaml @@ -27,8 +27,8 @@ spec: spec: containers: - command: - - user - image: igorrudyk1/hotelreservation:latest + - ./user + image: deathstarbench/hotel-reservation:latest name: hotel-reserv-user ports: - containerPort: 8086 diff --git a/hotelReservation/openshift/consul-deployment.yaml b/hotelReservation/openshift/consul-deployment.yaml index ce0986bcc..956d5aaa5 100644 --- a/hotelReservation/openshift/consul-deployment.yaml +++ b/hotelReservation/openshift/consul-deployment.yaml @@ -23,7 +23,7 @@ spec: sidecar.istio.io/inject: "true" spec: containers: - - image: consul:latest + - image: hashicorp/consul:latest name: consul ports: - containerPort: 8300 diff --git a/hotelReservation/openshift/frontend-deployment.yaml b/hotelReservation/openshift/frontend-deployment.yaml index fae7a119d..74ea76365 100644 --- a/hotelReservation/openshift/frontend-deployment.yaml +++ b/hotelReservation/openshift/frontend-deployment.yaml @@ -24,7 +24,7 @@ spec: spec: containers: - command: - - frontend + - ./frontend env: - name: DLOG value: DEBUG @@ -34,7 +34,7 @@ spec: - containerPort: 5000 resources: {} volumeMounts: - - mountPath: /go/src/github.com/harlow/go-micro-services/config.json + - mountPath: config.json subPath: config.json name: config-json restartPolicy: Always diff --git a/hotelReservation/openshift/geo-deployment.yaml b/hotelReservation/openshift/geo-deployment.yaml index b3166eaab..33f46a3d1 100644 --- a/hotelReservation/openshift/geo-deployment.yaml +++ b/hotelReservation/openshift/geo-deployment.yaml @@ -24,7 +24,7 @@ spec: spec: containers: - command: - - geo + - ./geo env: - name: DLOG value: DEBUG @@ -34,7 +34,7 @@ spec: - containerPort: 8083 resources: {} volumeMounts: - - mountPath: /go/src/github.com/harlow/go-micro-services/config.json + - mountPath: config.json subPath: config.json name: config-json restartPolicy: Always diff --git a/hotelReservation/openshift/hr-client.yaml b/hotelReservation/openshift/hr-client.yaml index d2a8612c8..45ae60005 100644 --- a/hotelReservation/openshift/hr-client.yaml +++ b/hotelReservation/openshift/hr-client.yaml @@ -19,19 +19,14 @@ spec: death-star-project: hotel-res app: hr-client name: hr-client -# annotations: -# sidecar.istio.io/inject: "true" spec: containers: - name: hr-client - image: ubuntu - command: ["/bin/sh", "-c"] + image: deathstarbench/wrk2-client + command: ["/bin/sh"] args: - - apt-get -y update && - apt-get -y upgrade && - apt-get -y install dnsutils git vim python3 python3-aiohttp libssl-dev libz-dev luarocks iputils-ping lynx build-essential gcc bash curl && - luarocks install luasocket && - sleep 365d + - -c + - sleep 365d imagePullPolicy: Always - restartPolicy: Always + restartPolicy: Always diff --git a/hotelReservation/openshift/memcached-profile-deployment.yaml b/hotelReservation/openshift/memcached-profile-deployment.yaml index 0c60a3133..a13f936cb 100644 --- a/hotelReservation/openshift/memcached-profile-deployment.yaml +++ b/hotelReservation/openshift/memcached-profile-deployment.yaml @@ -30,7 +30,6 @@ spec: value: "2" image: memcached name: hotel-reserv-profile-mmc - command: ["memcached", "-c", "65536"] ports: - containerPort: 11211 resources: {} diff --git a/hotelReservation/openshift/memcached-rate-deployment.yaml b/hotelReservation/openshift/memcached-rate-deployment.yaml index f105d1709..62871653d 100644 --- a/hotelReservation/openshift/memcached-rate-deployment.yaml +++ b/hotelReservation/openshift/memcached-rate-deployment.yaml @@ -30,7 +30,6 @@ spec: value: "2" image: memcached name: hotel-reserv-rate-mmc - command: ["memcached", "-c", "65536"] ports: - containerPort: 11211 resources: {} diff --git a/hotelReservation/openshift/memcached-reserve-deployment.yaml b/hotelReservation/openshift/memcached-reserve-deployment.yaml index 94b51d3af..12fdf370b 100644 --- a/hotelReservation/openshift/memcached-reserve-deployment.yaml +++ b/hotelReservation/openshift/memcached-reserve-deployment.yaml @@ -30,7 +30,6 @@ spec: value: "2" image: memcached name: hotel-reserv-reservation-mmc - command: ["memcached", "-c", "65536"] ports: - containerPort: 11211 resources: {} diff --git a/hotelReservation/openshift/rate-deployment.yaml b/hotelReservation/openshift/rate-deployment.yaml index 58651eebf..f3a64f688 100644 --- a/hotelReservation/openshift/rate-deployment.yaml +++ b/hotelReservation/openshift/rate-deployment.yaml @@ -24,7 +24,7 @@ spec: spec: containers: - command: - - rate + - ./rate env: - name: DLOG value: DEBUG @@ -34,7 +34,7 @@ spec: - containerPort: 8084 resources: {} volumeMounts: - - mountPath: /go/src/github.com/harlow/go-micro-services/config.json + - mountPath: config.json subPath: config.json name: config-json restartPolicy: Always diff --git a/hotelReservation/openshift/recommendation-deployment.yaml b/hotelReservation/openshift/recommendation-deployment.yaml index 7f4b97c9e..6b5b99006 100644 --- a/hotelReservation/openshift/recommendation-deployment.yaml +++ b/hotelReservation/openshift/recommendation-deployment.yaml @@ -24,7 +24,7 @@ spec: spec: containers: - command: - - recommendation + - ./recommendation env: - name: DLOG value: DEBUG @@ -34,7 +34,7 @@ spec: - containerPort: 8085 resources: {} volumeMounts: - - mountPath: /go/src/github.com/harlow/go-micro-services/config.json + - mountPath: config.json subPath: config.json name: config-json restartPolicy: Always diff --git a/hotelReservation/openshift/reservation-deployment.yaml b/hotelReservation/openshift/reservation-deployment.yaml index cd5654dfb..deedd48f3 100644 --- a/hotelReservation/openshift/reservation-deployment.yaml +++ b/hotelReservation/openshift/reservation-deployment.yaml @@ -15,7 +15,7 @@ spec: strategy: {} template: metadata: - name: reservation + name: ./reservation labels: death-star-project: hotel-res app-name: reservation @@ -34,7 +34,7 @@ spec: - containerPort: 8087 resources: {} volumeMounts: - - mountPath: /go/src/github.com/harlow/go-micro-services/config.json + - mountPath: config.json subPath: config.json name: config-json restartPolicy: Always diff --git a/hotelReservation/openshift/search-deployment.yaml b/hotelReservation/openshift/search-deployment.yaml index 1b46030cd..d8eeea0f0 100644 --- a/hotelReservation/openshift/search-deployment.yaml +++ b/hotelReservation/openshift/search-deployment.yaml @@ -24,7 +24,7 @@ spec: spec: containers: - command: - - search + - ./search env: - name: DLOG value: DEBUG @@ -34,7 +34,7 @@ spec: - containerPort: 8082 resources: {} volumeMounts: - - mountPath: /go/src/github.com/harlow/go-micro-services/config.json + - mountPath: config.json subPath: config.json name: config-json restartPolicy: Always diff --git a/hotelReservation/openshift/user-deployment.yaml b/hotelReservation/openshift/user-deployment.yaml index 18bb5a03a..f606adade 100644 --- a/hotelReservation/openshift/user-deployment.yaml +++ b/hotelReservation/openshift/user-deployment.yaml @@ -24,7 +24,7 @@ spec: spec: containers: - command: - - user + - ./user env: - name: DLOG value: DEBUG @@ -34,7 +34,7 @@ spec: - containerPort: 8086 resources: {} volumeMounts: - - mountPath: /go/src/github.com/harlow/go-micro-services/config.json + - mountPath: config.json subPath: config.json name: config-json restartPolicy: Always diff --git a/hotelReservation/services/frontend/server.go b/hotelReservation/services/frontend/server.go index 19327093b..d9ed0b083 100644 --- a/hotelReservation/services/frontend/server.go +++ b/hotelReservation/services/frontend/server.go @@ -6,18 +6,19 @@ import ( "net/http" "strconv" - recommendation "github.com/harlow/go-micro-services/services/recommendation/proto" - reservation "github.com/harlow/go-micro-services/services/reservation/proto" - user "github.com/harlow/go-micro-services/services/user/proto" - review "github.com/harlow/go-micro-services/services/review/proto" + "google.golang.org/grpc" + + recommendation "github.com/delimitrou/DeathStarBench/hotelreservation/services/recommendation/proto" + reservation "github.com/delimitrou/DeathStarBench/hotelreservation/services/reservation/proto" + user "github.com/delimitrou/DeathStarBench/hotelreservation/services/user/proto" "github.com/rs/zerolog/log" - "github.com/harlow/go-micro-services/dialer" - "github.com/harlow/go-micro-services/registry" - profile "github.com/harlow/go-micro-services/services/profile/proto" - search "github.com/harlow/go-micro-services/services/search/proto" - "github.com/harlow/go-micro-services/tls" - "github.com/harlow/go-micro-services/tracing" + "github.com/delimitrou/DeathStarBench/hotelreservation/dialer" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + profile "github.com/delimitrou/DeathStarBench/hotelreservation/services/profile/proto" + search "github.com/delimitrou/DeathStarBench/hotelreservation/services/search/proto" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" + "github.com/delimitrou/DeathStarBench/hotelreservation/tracing" "github.com/opentracing/opentracing-go" ) @@ -27,8 +28,8 @@ type Server struct { profileClient profile.ProfileClient recommendationClient recommendation.RecommendationClient userClient user.UserClient - reviewClient review.ReviewClient reservationClient reservation.ReservationClient + KnativeDns string IpAddr string Port int Tracer opentracing.Tracer @@ -61,11 +62,6 @@ func (s *Server) Run() error { if err := s.initReservation("srv-reservation"); err != nil { return err } - - if err := s.initReviewClient("srv-review"); err != nil { - return err - } - log.Info().Msg("Successfull") log.Trace().Msg("frontend before mux") @@ -74,7 +70,6 @@ func (s *Server) Run() error { mux.Handle("/hotels", http.HandlerFunc(s.searchHandler)) mux.Handle("/recommendations", http.HandlerFunc(s.recommendHandler)) mux.Handle("/user", http.HandlerFunc(s.userHandler)) - mux.Handle("/review", http.HandlerFunc(s.reviewHandler)) mux.Handle("/reservation", http.HandlerFunc(s.reservationHandler)) log.Trace().Msg("frontend starts serving") @@ -89,17 +84,13 @@ func (s *Server) Run() error { srv.TLSConfig = tlsconfig return srv.ListenAndServeTLS("x509/server_cert.pem", "x509/server_key.pem") } else { - log.Info().Msg("Serving https") + log.Info().Msg("Serving http") return srv.ListenAndServe() } } func (s *Server) initSearchClient(name string) error { - conn, err := dialer.Dial( - name, - dialer.WithTracer(s.Tracer), - dialer.WithBalancer(s.Registry.Client), - ) + conn, err := s.getGprcConn(name) if err != nil { return fmt.Errorf("dialer error: %v", err) } @@ -107,25 +98,8 @@ func (s *Server) initSearchClient(name string) error { return nil } -func (s *Server) initReviewClient(name string) error { - conn, err := dialer.Dial( - name, - dialer.WithTracer(s.Tracer), - dialer.WithBalancer(s.Registry.Client), - ) - if err != nil { - return fmt.Errorf("dialer error: %v", err) - } - s.reviewClient = review.NewReviewClient(conn) - return nil -} - func (s *Server) initProfileClient(name string) error { - conn, err := dialer.Dial( - name, - dialer.WithTracer(s.Tracer), - dialer.WithBalancer(s.Registry.Client), - ) + conn, err := s.getGprcConn(name) if err != nil { return fmt.Errorf("dialer error: %v", err) } @@ -134,11 +108,7 @@ func (s *Server) initProfileClient(name string) error { } func (s *Server) initRecommendationClient(name string) error { - conn, err := dialer.Dial( - name, - dialer.WithTracer(s.Tracer), - dialer.WithBalancer(s.Registry.Client), - ) + conn, err := s.getGprcConn(name) if err != nil { return fmt.Errorf("dialer error: %v", err) } @@ -147,11 +117,7 @@ func (s *Server) initRecommendationClient(name string) error { } func (s *Server) initUserClient(name string) error { - conn, err := dialer.Dial( - name, - dialer.WithTracer(s.Tracer), - dialer.WithBalancer(s.Registry.Client), - ) + conn, err := s.getGprcConn(name) if err != nil { return fmt.Errorf("dialer error: %v", err) } @@ -160,11 +126,7 @@ func (s *Server) initUserClient(name string) error { } func (s *Server) initReservation(name string) error { - conn, err := dialer.Dial( - name, - dialer.WithTracer(s.Tracer), - dialer.WithBalancer(s.Registry.Client), - ) + conn, err := s.getGprcConn(name) if err != nil { return fmt.Errorf("dialer error: %v", err) } @@ -172,6 +134,23 @@ func (s *Server) initReservation(name string) error { return nil } +func (s *Server) getGprcConn(name string) (*grpc.ClientConn, error) { + log.Info().Msg("get Grpc conn is :") + log.Info().Msg(s.KnativeDns) + log.Info().Msg(fmt.Sprintf("%s.%s", name, s.KnativeDns)) + if s.KnativeDns != "" { + return dialer.Dial( + fmt.Sprintf("%s.%s", name, s.KnativeDns), + dialer.WithTracer(s.Tracer)) + } else { + return dialer.Dial( + name, + dialer.WithTracer(s.Tracer), + dialer.WithBalancer(s.Registry.Client), + ) + } +} + func (s *Server) searchHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") ctx := r.Context() @@ -199,7 +178,7 @@ func (s *Server) searchHandler(w http.ResponseWriter, r *http.Request) { log.Trace().Msg("starts searchHandler querying downstream") - log.Info().Msgf(" SEARCH [lat: %v, lon: %v, inDate: %v, outDate: %v", lat, lon, inDate, outDate) + log.Trace().Msgf("SEARCH [lat: %v, lon: %v, inDate: %v, outDate: %v", lat, lon, inDate, outDate) // search for best hotels searchResp, err := s.searchClient.Nearby(ctx, &search.NearbyRequest{ Lat: lat, @@ -212,10 +191,10 @@ func (s *Server) searchHandler(w http.ResponseWriter, r *http.Request) { return } - log.Info().Msg("SearchHandler gets searchResp") - for _, hid := range searchResp.HotelIds { - log.Info().Msgf("Search Handler hotelId = %s", hid) - } + log.Trace().Msg("SearchHandler gets searchResp") + //for _, hid := range searchResp.HotelIds { + // log.Trace().Msgf("Search Handler hotelId = %s", hid) + //} // grab locale from query params or default to en locale := r.URL.Query().Get("locale") @@ -236,8 +215,8 @@ func (s *Server) searchHandler(w http.ResponseWriter, r *http.Request) { return } - log.Info().Msgf("searchHandler gets reserveResp") - log.Info().Msgf("searchHandler gets reserveResp.HotelId = %s", reservationResp.HotelId) + log.Trace().Msgf("searchHandler gets reserveResp") + log.Trace().Msgf("searchHandler gets reserveResp.HotelId = %s", reservationResp.HotelId) // hotel profiles profileResp, err := s.profileClient.GetProfiles(ctx, &profile.Request{ @@ -250,7 +229,7 @@ func (s *Server) searchHandler(w http.ResponseWriter, r *http.Request) { return } - log.Info().Msg("searchHandler gets profileResp") + log.Trace().Msg("searchHandler gets profileResp") json.NewEncoder(w).Encode(geoJSONResponse(profileResp.Hotels)) } @@ -305,58 +284,6 @@ func (s *Server) recommendHandler(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(geoJSONResponse(profileResp.Hotels)) } -func (s *Server) reviewHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - ctx := r.Context() - - username, password := r.URL.Query().Get("username"), r.URL.Query().Get("password") - if username == "" || password == "" { - http.Error(w, "Please specify username and password", http.StatusBadRequest) - return - } - - // Check username and password - recResp, err := s.userClient.CheckUser(ctx, &user.Request{ - Username: username, - Password: password, - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - str := "Logged-in successfully!" - if recResp.Correct == false { - str = "Failed. Please check your username and password. " - } - - hotelId := r.URL.Query().Get("hotelId") - if hotelId == "" { - http.Error(w, "Please specify hotelId params", http.StatusBadRequest) - return - } - - revInput := review.Request{HotelId:hotelId} - - revResp, err := s.reviewClient.GetReviews(ctx, &revInput) - - str = "Have reviews = " + strconv.Itoa(len(revResp.Reviews)) - if len(revResp.Reviews) == 0 { - str = "Failed. No Reviews. " - } - - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - res := map[string]interface{}{ - "message": str, - } - - json.NewEncoder(w).Encode(res) -} - func (s *Server) userHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") ctx := r.Context() diff --git a/hotelReservation/services/geo/server.go b/hotelReservation/services/geo/server.go index 53e45ae60..0706b9884 100644 --- a/hotelReservation/services/geo/server.go +++ b/hotelReservation/services/geo/server.go @@ -1,26 +1,21 @@ package geo import ( - // "encoding/json" + "context" "fmt" - - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - // "io/ioutil" "net" - // "os" "time" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + pb "github.com/delimitrou/DeathStarBench/hotelreservation/services/geo/proto" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" "github.com/google/uuid" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" "github.com/hailocab/go-geoindex" - "github.com/harlow/go-micro-services/registry" - pb "github.com/harlow/go-micro-services/services/geo/proto" - "github.com/harlow/go-micro-services/tls" opentracing "github.com/opentracing/opentracing-go" "github.com/rs/zerolog/log" - "golang.org/x/net/context" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" ) @@ -36,11 +31,11 @@ type Server struct { index *geoindex.ClusteringIndex uuid string - Registry *registry.Client - Tracer opentracing.Tracer - Port int - IpAddr string - MongoSession *mgo.Session + Registry *registry.Client + Tracer opentracing.Tracer + Port int + IpAddr string + MongoClient *mongo.Client } // Run starts the server @@ -50,17 +45,11 @@ func (s *Server) Run() error { } if s.index == nil { - s.index = newGeoIndex(s.MongoSession) + s.index = newGeoIndex(s.MongoClient) } s.uuid = uuid.New().String() - // opts := []grpc.ServerOption { - // grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - // PermitWithoutStream: true, - // }), - // } - opts := []grpc.ServerOption{ grpc.KeepaliveParams(keepalive.ServerParameters{ Timeout: 120 * time.Second, @@ -87,21 +76,6 @@ func (s *Server) Run() error { return fmt.Errorf("failed to listen: %v", err) } - // register the service - // jsonFile, err := os.Open("config.json") - // if err != nil { - // fmt.Println(err) - // } - - // defer jsonFile.Close() - - // byteValue, _ := ioutil.ReadAll(jsonFile) - - // var result map[string]string - // json.Unmarshal([]byte(byteValue), &result) - - // fmt.Printf("geo server ip = %s, port = %d\n", s.IpAddr, s.Port) - err = s.Registry.Register(name, s.uuid, s.IpAddr, s.Port) if err != nil { return fmt.Errorf("failed register: %v", err) @@ -154,21 +128,17 @@ func (s *Server) getNearbyPoints(ctx context.Context, lat, lon float64) []geoind } // newGeoIndex returns a geo index with points loaded -func newGeoIndex(session *mgo.Session) *geoindex.ClusteringIndex { - // session, err := mgo.Dial("mongodb-geo") - // if err != nil { - // panic(err) - // } - // defer session.Close() - +func newGeoIndex(client *mongo.Client) *geoindex.ClusteringIndex { log.Trace().Msg("new geo newGeoIndex") - s := session.Copy() - defer s.Close() - c := s.DB("geo-db").C("geo") + collection := client.Database("geo-db").Collection("geo") + curr, err := collection.Find(context.TODO(), bson.D{}) + if err != nil { + log.Error().Msgf("Failed get geo data: ", err) + } var points []*point - err := c.Find(bson.M{}).All(&points) + curr.All(context.TODO(), &points) if err != nil { log.Error().Msgf("Failed get geo data: ", err) } diff --git a/hotelReservation/services/profile/server.go b/hotelReservation/services/profile/server.go index 922e57b8f..31d97df5b 100644 --- a/hotelReservation/services/profile/server.go +++ b/hotelReservation/services/profile/server.go @@ -1,48 +1,44 @@ package profile import ( + "context" "encoding/json" "fmt" - - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - // "io/ioutil" "net" - // "os" + "sync" "time" - "github.com/rs/zerolog/log" - + "github.com/bradfitz/gomemcache/memcache" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + pb "github.com/delimitrou/DeathStarBench/hotelreservation/services/profile/proto" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" "github.com/google/uuid" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/harlow/go-micro-services/registry" - pb "github.com/harlow/go-micro-services/services/profile/proto" - "github.com/harlow/go-micro-services/tls" "github.com/opentracing/opentracing-go" - "golang.org/x/net/context" + "github.com/rs/zerolog/log" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" - - "github.com/bradfitz/gomemcache/memcache" - // "strings" ) const name = "srv-profile" // Server implements the profile service type Server struct { - Tracer opentracing.Tracer - uuid string - Port int - IpAddr string - MongoSession *mgo.Session - Registry *registry.Client - MemcClient *memcache.Client + Tracer opentracing.Tracer + uuid string + Port int + IpAddr string + MongoClient *mongo.Client + Registry *registry.Client + MemcClient *memcache.Client } // Run starts the server func (s *Server) Run() error { + opentracing.SetGlobalTracer(s.Tracer) + if s.Port == 0 { return fmt.Errorf("server port must be set") } @@ -76,19 +72,6 @@ func (s *Server) Run() error { log.Fatal().Msgf("failed to configure listener: %v", err) } - // register the service - // jsonFile, err := os.Open("config.json") - // if err != nil { - // fmt.Println(err) - // } - - // defer jsonFile.Close() - - // byteValue, _ := ioutil.ReadAll(jsonFile) - - // var result map[string]string - // json.Unmarshal([]byte(byteValue), &result) - err = s.Registry.Register(name, s.uuid, s.IpAddr, s.Port) if err != nil { return fmt.Errorf("failed register: %v", err) @@ -105,62 +88,73 @@ func (s *Server) Shutdown() { // GetProfiles returns hotel profiles for requested IDs func (s *Server) GetProfiles(ctx context.Context, req *pb.Request) (*pb.Result, error) { - // session, err := mgo.Dial("mongodb-profile") - // if err != nil { - // panic(err) - // } - // defer session.Close() - log.Trace().Msgf("In GetProfiles") + var wg sync.WaitGroup + var mutex sync.Mutex + + // one hotel should only have one profile + hotelIds := make([]string, 0) + profileMap := make(map[string]struct{}) + for _, hotelId := range req.HotelIds { + hotelIds = append(hotelIds, hotelId) + profileMap[hotelId] = struct{}{} + } + + memSpan, _ := opentracing.StartSpanFromContext(ctx, "memcached_get_profile") + memSpan.SetTag("span.kind", "client") + resMap, err := s.MemcClient.GetMulti(hotelIds) + memSpan.Finish() + res := new(pb.Result) hotels := make([]*pb.Hotel, 0) - // one hotel should only have one profile + if err != nil && err != memcache.ErrCacheMiss { + log.Panic().Msgf("Tried to get hotelIds [%v], but got memmcached error = %s", hotelIds, err) + } else { + for hotelId, item := range resMap { + profileStr := string(item.Value) + log.Trace().Msgf("memc hit with %v", profileStr) + + hotelProf := new(pb.Hotel) + json.Unmarshal(item.Value, hotelProf) + hotels = append(hotels, hotelProf) + delete(profileMap, hotelId) + } + + wg.Add(len(profileMap)) + for hotelId := range profileMap { + go func(hotelId string) { + var hotelProf *pb.Hotel + + collection := s.MongoClient.Database("profile-db").Collection("hotels") + + mongoSpan, _ := opentracing.StartSpanFromContext(ctx, "mongo_profile") + mongoSpan.SetTag("span.kind", "client") + err := collection.FindOne(context.TODO(), bson.D{{"id", hotelId}}).Decode(&hotelProf) + mongoSpan.Finish() + + if err != nil { + log.Error().Msgf("Failed get hotels data: ", err) + } + + mutex.Lock() + hotels = append(hotels, hotelProf) + mutex.Unlock() + + profJson, err := json.Marshal(hotelProf) + if err != nil { + log.Error().Msgf("Failed to marshal hotel [id: %v] with err:", hotelProf.Id, err) + } + memcStr := string(profJson) - for _, i := range req.HotelIds { - // first check memcached - item, err := s.MemcClient.Get(i) - if err == nil { - // memcached hit - profile_str := string(item.Value) - log.Trace().Msgf("memc hit with %v", profile_str) - - hotel_prof := new(pb.Hotel) - json.Unmarshal(item.Value, hotel_prof) - hotels = append(hotels, hotel_prof) - - } else if err == memcache.ErrCacheMiss { - // memcached miss, set up mongo connection - session := s.MongoSession.Copy() - defer session.Close() - c := session.DB("profile-db").C("hotels") - - hotel_prof := new(pb.Hotel) - err := c.Find(bson.M{"id": i}).One(&hotel_prof) - - if err != nil { - log.Error().Msgf("Failed get hotels data: ", err) - } - - // for _, h := range hotels { - // res.Hotels = append(res.Hotels, h) - // } - hotels = append(hotels, hotel_prof) - - prof_json, err := json.Marshal(hotel_prof) - if err != nil { - log.Error().Msgf("Failed to marshal hotel [id: %v] with err:", hotel_prof.Id, err) - } - memc_str := string(prof_json) - - // write to memcached - s.MemcClient.Set(&memcache.Item{Key: i, Value: []byte(memc_str)}) - - } else { - log.Panic().Msgf("Tried to get hotelId [%v], but got memmcached error = %s", i, err) + // write to memcached + go s.MemcClient.Set(&memcache.Item{Key: hotelId, Value: []byte(memcStr)}) + defer wg.Done() + }(hotelId) } } + wg.Wait() res.Hotels = hotels log.Trace().Msgf("In GetProfiles after getting resp") diff --git a/hotelReservation/services/rate/server.go b/hotelReservation/services/rate/server.go index 8df55096f..b0087b6d1 100644 --- a/hotelReservation/services/rate/server.go +++ b/hotelReservation/services/rate/server.go @@ -1,50 +1,46 @@ package rate import ( + "context" "encoding/json" "fmt" - - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - // "io/ioutil" "net" - // "os" "sort" + "strings" + "sync" "time" - "github.com/rs/zerolog/log" - + "github.com/bradfitz/gomemcache/memcache" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + pb "github.com/delimitrou/DeathStarBench/hotelreservation/services/rate/proto" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" "github.com/google/uuid" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/harlow/go-micro-services/registry" - pb "github.com/harlow/go-micro-services/services/rate/proto" - "github.com/harlow/go-micro-services/tls" "github.com/opentracing/opentracing-go" - "golang.org/x/net/context" + "github.com/rs/zerolog/log" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" - - "strings" - - "github.com/bradfitz/gomemcache/memcache" ) const name = "srv-rate" // Server implements the rate service type Server struct { - Tracer opentracing.Tracer - Port int - IpAddr string - MongoSession *mgo.Session - Registry *registry.Client - MemcClient *memcache.Client - uuid string + Tracer opentracing.Tracer + Port int + IpAddr string + MongoClient *mongo.Client + Registry *registry.Client + MemcClient *memcache.Client + uuid string } // Run starts the server func (s *Server) Run() error { + opentracing.SetGlobalTracer(s.Tracer) + if s.Port == 0 { return fmt.Errorf("server port must be set") } @@ -76,19 +72,6 @@ func (s *Server) Run() error { log.Fatal().Msgf("failed to listen: %v", err) } - // register the service - // jsonFile, err := os.Open("config.json") - // if err != nil { - // fmt.Println(err) - // } - - // defer jsonFile.Close() - - // byteValue, _ := ioutil.ReadAll(jsonFile) - - // var result map[string]string - // json.Unmarshal([]byte(byteValue), &result) - err = s.Registry.Register(name, s.uuid, s.IpAddr, s.Port) if err != nil { return fmt.Errorf("failed register: %v", err) @@ -106,64 +89,88 @@ func (s *Server) Shutdown() { // GetRates gets rates for hotels for specific date range. func (s *Server) GetRates(ctx context.Context, req *pb.Request) (*pb.Result, error) { res := new(pb.Result) - // session, err := mgo.Dial("mongodb-rate") - // if err != nil { - // panic(err) - // } - // defer session.Close() ratePlans := make(RatePlans, 0) + hotelIds := []string{} + rateMap := make(map[string]struct{}) for _, hotelID := range req.HotelIds { - // first check memcached - item, err := s.MemcClient.Get(hotelID) - if err == nil { - // memcached hit - rate_strs := strings.Split(string(item.Value), "\n") - - log.Trace().Msgf("memc hit, hotelId = %s,rate strings: %v", hotelID, rate_strs) - - for _, rate_str := range rate_strs { - if len(rate_str) != 0 { - rate_p := new(pb.RatePlan) - json.Unmarshal([]byte(rate_str), rate_p) - ratePlans = append(ratePlans, rate_p) + hotelIds = append(hotelIds, hotelID) + rateMap[hotelID] = struct{}{} + } + // first check memcached(get-multi) + memSpan, _ := opentracing.StartSpanFromContext(ctx, "memcached_get_multi_rate") + memSpan.SetTag("span.kind", "client") + + resMap, err := s.MemcClient.GetMulti(hotelIds) + memSpan.Finish() + + var wg sync.WaitGroup + var mutex sync.Mutex + if err != nil && err != memcache.ErrCacheMiss { + log.Panic().Msgf("Memmcached error while trying to get hotel [id: %v]= %s", hotelIds, err) + } else { + for hotelId, item := range resMap { + rateStrs := strings.Split(string(item.Value), "\n") + log.Trace().Msgf("memc hit, hotelId = %s,rate strings: %v", hotelId, rateStrs) + + for _, rateStr := range rateStrs { + if len(rateStr) != 0 { + rateP := new(pb.RatePlan) + json.Unmarshal([]byte(rateStr), rateP) + ratePlans = append(ratePlans, rateP) } } - } else if err == memcache.ErrCacheMiss { - - log.Trace().Msgf("memc miss, hotelId = %s", hotelID) - - log.Trace().Msg("memcached miss, set up mongo connection") - // memcached miss, set up mongo connection - session := s.MongoSession.Copy() - defer session.Close() - c := session.DB("rate-db").C("inventory") - - memc_str := "" - - tmpRatePlans := make(RatePlans, 0) - err := c.Find(&bson.M{"hotelId": hotelID}).All(&tmpRatePlans) - if err != nil { - log.Panic().Msgf("Tried to find hotelId [%v], but got error", hotelID, err.Error()) - } else { - for _, r := range tmpRatePlans { - ratePlans = append(ratePlans, r) - rate_json, err := json.Marshal(r) - if err != nil { - log.Error().Msgf("Failed to marshal plan [Code: %v] with error: %s", r.Code, err) - } - memc_str = memc_str + string(rate_json) + "\n" + + delete(rateMap, hotelId) + } + + wg.Add(len(rateMap)) + for hotelId := range rateMap { + go func(id string) { + log.Trace().Msgf("memc miss, hotelId = %s", id) + log.Trace().Msg("memcached miss, set up mongo connection") + + mongoSpan, _ := opentracing.StartSpanFromContext(ctx, "mongo_rate") + mongoSpan.SetTag("span.kind", "client") + + // memcached miss, set up mongo connection + collection := s.MongoClient.Database("rate-db").Collection("inventory") + curr, err := collection.Find(context.TODO(), bson.D{}) + if err != nil { + log.Error().Msgf("Failed get rate data: ", err) } - } - // write to memcached - s.MemcClient.Set(&memcache.Item{Key: hotelID, Value: []byte(memc_str)}) + tmpRatePlans := make(RatePlans, 0) + curr.All(context.TODO(), &tmpRatePlans) + if err != nil { + log.Error().Msgf("Failed get rate data: ", err) + } + + mongoSpan.Finish() + + memcStr := "" + if err != nil { + log.Panic().Msgf("Tried to find hotelId [%v], but got error", id, err.Error()) + } else { + for _, r := range tmpRatePlans { + mutex.Lock() + ratePlans = append(ratePlans, r) + mutex.Unlock() + rateJson, err := json.Marshal(r) + if err != nil { + log.Error().Msgf("Failed to marshal plan [Code: %v] with error: %s", r.Code, err) + } + memcStr = memcStr + string(rateJson) + "\n" + } + } + go s.MemcClient.Set(&memcache.Item{Key: id, Value: []byte(memcStr)}) - } else { - log.Panic().Msgf("Memmcached error while trying to get hotel [id: %v]= %s", hotelID, err) + defer wg.Done() + }(hotelId) } } + wg.Wait() sort.Sort(ratePlans) res.RatePlans = ratePlans diff --git a/hotelReservation/services/recommendation/server.go b/hotelReservation/services/recommendation/server.go index a5a8e32ce..053c20d01 100644 --- a/hotelReservation/services/recommendation/server.go +++ b/hotelReservation/services/recommendation/server.go @@ -1,43 +1,37 @@ package recommendation import ( - // "encoding/json" + "context" "fmt" + "math" + "net" + "time" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + pb "github.com/delimitrou/DeathStarBench/hotelreservation/services/recommendation/proto" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" "github.com/google/uuid" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" "github.com/hailocab/go-geoindex" - "github.com/harlow/go-micro-services/registry" - pb "github.com/harlow/go-micro-services/services/recommendation/proto" - "github.com/harlow/go-micro-services/tls" "github.com/opentracing/opentracing-go" "github.com/rs/zerolog/log" - "golang.org/x/net/context" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - // "io/ioutil" - "math" - "net" - - // "os" - "time" - // "strings" ) const name = "srv-recommendation" // Server implements the recommendation service type Server struct { - hotels map[string]Hotel - Tracer opentracing.Tracer - Port int - IpAddr string - MongoSession *mgo.Session - Registry *registry.Client - uuid string + hotels map[string]Hotel + Tracer opentracing.Tracer + Port int + IpAddr string + MongoClient *mongo.Client + Registry *registry.Client + uuid string } // Run starts the server @@ -47,7 +41,7 @@ func (s *Server) Run() error { } if s.hotels == nil { - s.hotels = loadRecommendations(s.MongoSession) + s.hotels = loadRecommendations(s.MongoClient) } s.uuid = uuid.New().String() @@ -77,19 +71,6 @@ func (s *Server) Run() error { log.Fatal().Msgf("failed to listen: %v", err) } - // // register the service - // jsonFile, err := os.Open("config.json") - // if err != nil { - // fmt.Println(err) - // } - - // defer jsonFile.Close() - - // byteValue, _ := ioutil.ReadAll(jsonFile) - - // var result map[string]string - // json.Unmarshal([]byte(byteValue), &result) - err = s.Registry.Register(name, s.uuid, s.IpAddr, s.Port) if err != nil { return fmt.Errorf("failed register: %v", err) @@ -168,20 +149,15 @@ func (s *Server) GetRecommendations(ctx context.Context, req *pb.Request) (*pb.R } // loadRecommendations loads hotel recommendations from mongodb. -func loadRecommendations(session *mgo.Session) map[string]Hotel { - // session, err := mgo.Dial("mongodb-recommendation") - // if err != nil { - // panic(err) - // } - // defer session.Close() - s := session.Copy() - defer s.Close() - - c := s.DB("recommendation-db").C("recommendation") - - // unmarshal json profiles +func loadRecommendations(client *mongo.Client) map[string]Hotel { + collection := client.Database("recommendation-db").Collection("recommendation") + curr, err := collection.Find(context.TODO(), bson.D{}) + if err != nil { + log.Error().Msgf("Failed get hotels data: ", err) + } + var hotels []Hotel - err := c.Find(bson.M{}).All(&hotels) + curr.All(context.TODO(), &hotels) if err != nil { log.Error().Msgf("Failed get hotels data: ", err) } @@ -195,10 +171,9 @@ func loadRecommendations(session *mgo.Session) map[string]Hotel { } type Hotel struct { - ID bson.ObjectId `bson:"_id"` - HId string `bson:"hotelId"` - HLat float64 `bson:"lat"` - HLon float64 `bson:"lon"` - HRate float64 `bson:"rate"` - HPrice float64 `bson:"price"` + HId string `bson:"hotelId"` + HLat float64 `bson:"lat"` + HLon float64 `bson:"lon"` + HRate float64 `bson:"rate"` + HPrice float64 `bson:"price"` } diff --git a/hotelReservation/services/reservation/server.go b/hotelReservation/services/reservation/server.go index c4d794c97..5f65a5528 100644 --- a/hotelReservation/services/reservation/server.go +++ b/hotelReservation/services/reservation/server.go @@ -1,48 +1,45 @@ package reservation import ( - // "encoding/json" + "context" "fmt" + "net" + "strconv" + "strings" + "sync" + "time" + "github.com/bradfitz/gomemcache/memcache" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + pb "github.com/delimitrou/DeathStarBench/hotelreservation/services/reservation/proto" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" "github.com/google/uuid" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/harlow/go-micro-services/registry" - pb "github.com/harlow/go-micro-services/services/reservation/proto" - "github.com/harlow/go-micro-services/tls" "github.com/opentracing/opentracing-go" - "golang.org/x/net/context" + "github.com/rs/zerolog/log" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - // "io/ioutil" - "net" - // "os" - "time" - - "github.com/bradfitz/gomemcache/memcache" - "github.com/rs/zerolog/log" - - // "strings" - "strconv" ) const name = "srv-reservation" // Server implements the user service type Server struct { - Tracer opentracing.Tracer - Port int - IpAddr string - MongoSession *mgo.Session - Registry *registry.Client - MemcClient *memcache.Client - uuid string + Tracer opentracing.Tracer + Port int + IpAddr string + MongoClient *mongo.Client + Registry *registry.Client + MemcClient *memcache.Client + uuid string } // Run starts the server func (s *Server) Run() error { + opentracing.SetGlobalTracer(s.Tracer) + if s.Port == 0 { return fmt.Errorf("server port must be set") } @@ -74,19 +71,6 @@ func (s *Server) Run() error { log.Fatal().Msgf("failed to listen: %v", err) } - // register the service - // jsonFile, err := os.Open("config.json") - // if err != nil { - // fmt.Println(err) - // } - - // defer jsonFile.Close() - - // byteValue, _ := ioutil.ReadAll(jsonFile) - - // var result map[string]string - // json.Unmarshal([]byte(byteValue), &result) - log.Trace().Msgf("In reservation s.IpAddr = %s, port = %d", s.IpAddr, s.Port) err = s.Registry.Register(name, s.uuid, s.IpAddr, s.Port) @@ -108,16 +92,9 @@ func (s *Server) MakeReservation(ctx context.Context, req *pb.Request) (*pb.Resu res := new(pb.Result) res.HotelId = make([]string, 0) - // session, err := mgo.Dial("mongodb-reservation") - // if err != nil { - // panic(err) - // } - // defer session.Close() - session := s.MongoSession.Copy() - defer session.Close() - - c := session.DB("reservation-db").C("reservation") - c1 := session.DB("reservation-db").C("number") + database := s.MongoClient.Database("reservation-db") + resCollection := database.Collection("reservation") + numCollection := database.Collection("number") inDate, _ := time.Parse( time.RFC3339, @@ -150,8 +127,14 @@ func (s *Server) MakeReservation(ctx context.Context, req *pb.Request) (*pb.Resu } else if err == memcache.ErrCacheMiss { // memcached miss log.Trace().Msgf("memcached miss") - reserve := make([]reservation, 0) - err := c.Find(&bson.M{"hotelId": hotelId, "inDate": indate, "outDate": outdate}).All(&reserve) + var reserve []reservation + + filter := bson.D{{"hotelId", hotelId}, {"inDate", indate}, {"outDate", outdate}} + curr, err := resCollection.Find(context.TODO(), filter) + if err != nil { + log.Error().Msgf("Failed get reservation data: ", err) + } + curr.All(context.TODO(), &reserve) if err != nil { log.Panic().Msgf("Tried to find hotelId [%v] from date [%v] to date [%v], but got error", hotelId, indate, outdate, err.Error()) } @@ -178,7 +161,7 @@ func (s *Server) MakeReservation(ctx context.Context, req *pb.Request) (*pb.Resu } else if err == memcache.ErrCacheMiss { // memcached miss var num number - err = c1.Find(&bson.M{"hotelId": hotelId}).One(&num) + err = numCollection.FindOne(context.TODO(), &bson.D{{"hotelId", hotelId}}).Decode(&num) if err != nil { log.Panic().Msgf("Tried to find hotelId [%v], but got error", hotelId, err.Error()) } @@ -210,12 +193,16 @@ func (s *Server) MakeReservation(ctx context.Context, req *pb.Request) (*pb.Resu for inDate.Before(outDate) { inDate = inDate.AddDate(0, 0, 1) outdate := inDate.String()[0:10] - err := c.Insert(&reservation{ - HotelId: hotelId, - CustomerName: req.CustomerName, - InDate: indate, - OutDate: outdate, - Number: int(req.RoomNumber)}) + _, err := resCollection.InsertOne( + context.TODO(), + reservation{ + HotelId: hotelId, + CustomerName: req.CustomerName, + InDate: indate, + OutDate: outdate, + Number: int(req.RoomNumber), + }, + ) if err != nil { log.Panic().Msgf("Tried to insert hotel [hotelId %v], but got error", hotelId, err.Error()) } @@ -232,97 +219,191 @@ func (s *Server) CheckAvailability(ctx context.Context, req *pb.Request) (*pb.Re res := new(pb.Result) res.HotelId = make([]string, 0) - // session, err := mgo.Dial("mongodb-reservation") - // if err != nil { - // panic(err) - // } - // defer session.Close() - session := s.MongoSession.Copy() - defer session.Close() + hotelMemKeys := []string{} + keysMap := make(map[string]struct{}) + resMap := make(map[string]bool) + // cache capacity since it will not change + for _, hotelId := range req.HotelId { + hotelMemKeys = append(hotelMemKeys, hotelId+"_cap") + resMap[hotelId] = true + keysMap[hotelId+"_cap"] = struct{}{} + } + + capMemSpan, _ := opentracing.StartSpanFromContext(ctx, "memcached_capacity_get_multi_number") + capMemSpan.SetTag("span.kind", "client") + cacheMemRes, err := s.MemcClient.GetMulti(hotelMemKeys) + capMemSpan.Finish() - c := session.DB("reservation-db").C("reservation") - c1 := session.DB("reservation-db").C("number") + numCollection := s.MongoClient.Database("reservation-db").Collection("number") + + misKeys := []string{} + // gather cache miss key to query in mongodb + if err == memcache.ErrCacheMiss { + for key := range keysMap { + if _, ok := cacheMemRes[key]; !ok { + misKeys = append(misKeys, key) + } + } + } else if err != nil { + log.Panic().Msgf("Tried to get memc_cap_key [%v], but got memmcached error = %s", hotelMemKeys, err) + } + // store whole capacity result in cacheCap + cacheCap := make(map[string]int) + for k, v := range cacheMemRes { + hotelCap, _ := strconv.Atoi(string(v.Value)) + cacheCap[k] = hotelCap + } + if len(misKeys) > 0 { + queryMissKeys := []string{} + for _, k := range misKeys { + queryMissKeys = append(queryMissKeys, strings.Split(k, "_")[0]) + } + var nums []number + capMongoSpan, _ := opentracing.StartSpanFromContext(ctx, "mongodb_capacity_get_multi_number") + capMongoSpan.SetTag("span.kind", "client") + curr, err := numCollection.Find(context.TODO(), bson.D{{"$in", queryMissKeys}}) + if err != nil { + log.Error().Msgf("Failed get reservation number data: ", err) + } + curr.All(context.TODO(), &nums) + if err != nil { + log.Error().Msgf("Failed get reservation number data: ", err) + } + capMongoSpan.Finish() + if err != nil { + log.Panic().Msgf("Tried to find hotelId [%v], but got error", misKeys, err.Error()) + } + for _, num := range nums { + cacheCap[num.HotelId] = num.Number + // we don't care set successfully or not + go s.MemcClient.Set(&memcache.Item{Key: num.HotelId + "_cap", Value: []byte(strconv.Itoa(num.Number))}) + } + } + reqCommand := []string{} + queryMap := make(map[string]map[string]string) for _, hotelId := range req.HotelId { log.Trace().Msgf("reservation check hotel %s", hotelId) inDate, _ := time.Parse( time.RFC3339, req.InDate+"T12:00:00+00:00") - outDate, _ := time.Parse( time.RFC3339, req.OutDate+"T12:00:00+00:00") - - indate := inDate.String()[0:10] - for inDate.Before(outDate) { - // check reservations - count := 0 + indate := inDate.String()[:10] inDate = inDate.AddDate(0, 0, 1) - log.Trace().Msgf("reservation check date %s", inDate.String()[0:10]) - outdate := inDate.String()[0:10] - - // first check memc - memc_key := hotelId + "_" + inDate.String()[0:10] + "_" + outdate - item, err := s.MemcClient.Get(memc_key) + outDate := inDate.String()[:10] + memcKey := hotelId + "_" + outDate + "_" + outDate + reqCommand = append(reqCommand, memcKey) + queryMap[memcKey] = map[string]string{ + "hotelId": hotelId, + "startDate": indate, + "endDate": outDate, + } + } + } - if err == nil { - // memcached hit - count, _ = strconv.Atoi(string(item.Value)) - log.Trace().Msgf("memcached hit %s = %d", memc_key, count) - } else if err == memcache.ErrCacheMiss { - // memcached miss - reserve := make([]reservation, 0) - err := c.Find(&bson.M{"hotelId": hotelId, "inDate": indate, "outDate": outdate}).All(&reserve) - if err != nil { - log.Panic().Msgf("Tried to find hotelId [%v] from date [%v] to date [%v], but got error", hotelId, indate, outdate, err.Error()) + type taskRes struct { + hotelId string + checkRes bool + } + reserveMemSpan, _ := opentracing.StartSpanFromContext(ctx, "memcached_reserve_get_multi_number") + ch := make(chan taskRes) + reserveMemSpan.SetTag("span.kind", "client") + // check capacity in memcached and mongodb + if itemsMap, err := s.MemcClient.GetMulti(reqCommand); err != nil && err != memcache.ErrCacheMiss { + reserveMemSpan.Finish() + log.Panic().Msgf("Tried to get memc_key [%v], but got memmcached error = %s", reqCommand, err) + } else { + reserveMemSpan.Finish() + // go through reservation count from memcached + go func() { + for k, v := range itemsMap { + id := strings.Split(k, "_")[0] + val, _ := strconv.Atoi(string(v.Value)) + var res bool + if val+int(req.RoomNumber) <= cacheCap[id] { + res = true } - for _, r := range reserve { - log.Trace().Msgf("reservation check reservation number = %d", hotelId) - count += r.Number + ch <- taskRes{ + hotelId: id, + checkRes: res, } - - // update memcached - s.MemcClient.Set(&memcache.Item{Key: memc_key, Value: []byte(strconv.Itoa(count))}) - } else { - log.Panic().Msgf("Tried to get memc_key [%v], but got memmcached error = %s", memc_key, err) - } - - // check capacity - // check memc capacity - memc_cap_key := hotelId + "_cap" - item, err = s.MemcClient.Get(memc_cap_key) - hotel_cap := 0 - if err == nil { - // memcached hit - hotel_cap, _ = strconv.Atoi(string(item.Value)) - log.Trace().Msgf("memcached hit %s = %d", memc_cap_key, hotel_cap) - } else if err == memcache.ErrCacheMiss { - var num number - err = c1.Find(&bson.M{"hotelId": hotelId}).One(&num) - if err != nil { - log.Panic().Msgf("Tried to find hotelId [%v], but got error", hotelId, err.Error()) - } - hotel_cap = int(num.Number) - // update memcached - s.MemcClient.Set(&memcache.Item{Key: memc_cap_key, Value: []byte(strconv.Itoa(hotel_cap))}) - } else { - log.Panic().Msgf("Tried to get memc_key [%v], but got memmcached error = %s", memc_cap_key, err) + close(ch) } - - if count+int(req.RoomNumber) > hotel_cap { - break + }() + // use miss reservation to get data from mongo + // rever string to indata and outdate + if err == memcache.ErrCacheMiss { + var wg sync.WaitGroup + for k := range itemsMap { + delete(queryMap, k) } - indate = outdate - - if inDate.Equal(outDate) { - res.HotelId = append(res.HotelId, hotelId) + wg.Add(len(queryMap)) + go func() { + wg.Wait() + close(ch) + }() + for command := range queryMap { + go func(comm string) { + defer wg.Done() + + var reserve []reservation + + queryItem := queryMap[comm] + resCollection := s.MongoClient.Database("reservation-db").Collection("reservation") + filter := bson.D{{"hotelId", queryItem["hotelId"]}, {"inDate", queryItem["startDate"]}, {"outDate", queryItem["endDate"]}} + + reserveMongoSpan, _ := opentracing.StartSpanFromContext(ctx, "mongodb_capacity_get_multi_number"+comm) + reserveMongoSpan.SetTag("span.kind", "client") + curr, err := resCollection.Find(context.TODO(), filter) + if err != nil { + log.Error().Msgf("Failed get reservation data: ", err) + } + curr.All(context.TODO(), &reserve) + if err != nil { + log.Error().Msgf("Failed get reservation data: ", err) + } + reserveMongoSpan.Finish() + + if err != nil { + log.Panic().Msgf("Tried to find hotelId [%v] from date [%v] to date [%v], but got error", + queryItem["hotelId"], queryItem["startDate"], queryItem["endDate"], err.Error()) + } + var count int + for _, r := range reserve { + log.Trace().Msgf("reservation check reservation number = %d", queryItem["hotelId"]) + count += r.Number + } + // update memcached + go s.MemcClient.Set(&memcache.Item{Key: comm, Value: []byte(strconv.Itoa(count))}) + var res bool + if count+int(req.RoomNumber) <= cacheCap[queryItem["hotelId"]] { + res = true + } + ch <- taskRes{ + hotelId: queryItem["hotelId"], + checkRes: res, + } + }(command) } } } + for task := range ch { + if !task.checkRes { + resMap[task.hotelId] = false + } + } + for k, v := range resMap { + if v { + res.HotelId = append(res.HotelId, k) + } + } + return res, nil } diff --git a/hotelReservation/services/review/proto/review.pb.go b/hotelReservation/services/review/proto/review.pb.go deleted file mode 100644 index c16a3b42a..000000000 --- a/hotelReservation/services/review/proto/review.pb.go +++ /dev/null @@ -1,494 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0-devel -// protoc v3.6.1 -// source: review.proto - -package review - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion4 - -const ( - Review_GetReviews_FullMethodName = "/review.Review/GetReviews" -) - -// ReviewClient is the client API for Review service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ReviewClient interface { - GetReviews(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error) -} - -type reviewClient struct { - cc *grpc.ClientConn -} - -func NewReviewClient(cc *grpc.ClientConn) ReviewClient { - return &reviewClient{cc} -} - -func (c *reviewClient) GetReviews(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error) { - out := new(Result) - err := c.cc.Invoke(ctx, Review_GetReviews_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ReviewServer is the server API for Review service. -// All implementations must embed UnimplementedReviewServer -// for forward compatibility -type ReviewServer interface { - GetReviews(context.Context, *Request) (*Result, error) -} - -// UnimplementedReviewServer must be embedded to have forward compatible implementations. -type UnimplementedReviewServer struct { -} - -func (UnimplementedReviewServer) GetReviews(context.Context, *Request) (*Result, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetReviews not implemented") -} -func (UnimplementedReviewServer) mustEmbedUnimplementedReviewServer() {} - -// UnsafeReviewServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ReviewServer will -// result in compilation errors. -type UnsafeReviewServer interface { - mustEmbedUnimplementedReviewServer() -} - -func _Review_GetReviews_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReviewServer).GetReviews(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Review_GetReviews_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReviewServer).GetReviews(ctx, req.(*Request)) - } - return interceptor(ctx, in, info, handler) -} - -// Review_ServiceDesc is the grpc.ServiceDesc for Review service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Review_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "review.Review", - HandlerType: (*ReviewServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetReviews", - Handler: _Review_GetReviews_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "review.proto", -} - -func RegisterReviewServer(s *grpc.Server, srv ReviewServer) { - s.RegisterService(&Review_ServiceDesc, srv) -} - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Request struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - HotelId string `protobuf:"bytes,1,opt,name=hotelId,proto3" json:"hotelId,omitempty"` -} - -func (x *Request) Reset() { - *x = Request{} - if protoimpl.UnsafeEnabled { - mi := &file_review_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Request) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Request) ProtoMessage() {} - -func (x *Request) ProtoReflect() protoreflect.Message { - mi := &file_review_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Request.ProtoReflect.Descriptor instead. -func (*Request) Descriptor() ([]byte, []int) { - return file_review_proto_rawDescGZIP(), []int{0} -} - -func (x *Request) GetHotelId() string { - if x != nil { - return x.HotelId - } - return "" -} - -type Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Reviews []*ReviewComm `protobuf:"bytes,1,rep,name=reviews,proto3" json:"reviews,omitempty"` -} - -func (x *Result) Reset() { - *x = Result{} - if protoimpl.UnsafeEnabled { - mi := &file_review_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Result) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Result) ProtoMessage() {} - -func (x *Result) ProtoReflect() protoreflect.Message { - mi := &file_review_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Result.ProtoReflect.Descriptor instead. -func (*Result) Descriptor() ([]byte, []int) { - return file_review_proto_rawDescGZIP(), []int{1} -} - -func (x *Result) GetReviews() []*ReviewComm { - if x != nil { - return x.Reviews - } - return nil -} - -type ReviewComm struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReviewId string `protobuf:"bytes,1,opt,name=reviewId,proto3" json:"reviewId,omitempty"` - HotelId string `protobuf:"bytes,2,opt,name=hotelId,proto3" json:"hotelId,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Rating float32 `protobuf:"fixed32,4,opt,name=rating,proto3" json:"rating,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - Images *Image `protobuf:"bytes,6,opt,name=images,proto3" json:"images,omitempty"` -} - -func (x *ReviewComm) Reset() { - *x = ReviewComm{} - if protoimpl.UnsafeEnabled { - mi := &file_review_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReviewComm) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReviewComm) ProtoMessage() {} - -func (x *ReviewComm) ProtoReflect() protoreflect.Message { - mi := &file_review_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReviewComm.ProtoReflect.Descriptor instead. -func (*ReviewComm) Descriptor() ([]byte, []int) { - return file_review_proto_rawDescGZIP(), []int{2} -} - -func (x *ReviewComm) GetReviewId() string { - if x != nil { - return x.ReviewId - } - return "" -} - -func (x *ReviewComm) GetHotelId() string { - if x != nil { - return x.HotelId - } - return "" -} - -func (x *ReviewComm) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ReviewComm) GetRating() float32 { - if x != nil { - return x.Rating - } - return 0 -} - -func (x *ReviewComm) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ReviewComm) GetImages() *Image { - if x != nil { - return x.Images - } - return nil -} - -type Image struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - Default bool `protobuf:"varint,2,opt,name=default,proto3" json:"default,omitempty"` -} - -func (x *Image) Reset() { - *x = Image{} - if protoimpl.UnsafeEnabled { - mi := &file_review_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Image) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Image) ProtoMessage() {} - -func (x *Image) ProtoReflect() protoreflect.Message { - mi := &file_review_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Image.ProtoReflect.Descriptor instead. -func (*Image) Descriptor() ([]byte, []int) { - return file_review_proto_rawDescGZIP(), []int{3} -} - -func (x *Image) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *Image) GetDefault() bool { - if x != nil { - return x.Default - } - return false -} - -var File_review_proto protoreflect.FileDescriptor - -var file_review_proto_rawDesc = []byte{ - 0x0a, 0x0c, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, - 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x22, 0x23, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x6f, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x68, 0x6f, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x22, 0x36, 0x0a, 0x06, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2c, 0x0a, 0x07, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x2e, - 0x52, 0x65, 0x76, 0x69, 0x65, 0x77, 0x43, 0x6f, 0x6d, 0x6d, 0x52, 0x07, 0x72, 0x65, 0x76, 0x69, - 0x65, 0x77, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x76, 0x69, 0x65, 0x77, 0x43, 0x6f, - 0x6d, 0x6d, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x49, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x49, 0x64, 0x12, 0x18, - 0x0a, 0x07, 0x68, 0x6f, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x68, 0x6f, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x06, 0x72, 0x61, - 0x74, 0x69, 0x6e, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x2e, - 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x06, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x22, 0x33, 0x0a, - 0x05, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x32, 0x37, 0x0a, 0x06, 0x52, 0x65, 0x76, 0x69, 0x65, 0x77, 0x12, 0x2d, 0x0a, 0x0a, - 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x69, 0x65, 0x77, 0x73, 0x12, 0x0f, 0x2e, 0x72, 0x65, 0x76, - 0x69, 0x65, 0x77, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x72, 0x65, - 0x76, 0x69, 0x65, 0x77, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0c, 0x5a, 0x0a, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_review_proto_rawDescOnce sync.Once - file_review_proto_rawDescData = file_review_proto_rawDesc -) - -func file_review_proto_rawDescGZIP() []byte { - file_review_proto_rawDescOnce.Do(func() { - file_review_proto_rawDescData = protoimpl.X.CompressGZIP(file_review_proto_rawDescData) - }) - return file_review_proto_rawDescData -} - -var file_review_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_review_proto_goTypes = []interface{}{ - (*Request)(nil), // 0: review.Request - (*Result)(nil), // 1: review.Result - (*ReviewComm)(nil), // 2: review.ReviewComm - (*Image)(nil), // 3: review.Image -} -var file_review_proto_depIdxs = []int32{ - 2, // 0: review.Result.reviews:type_name -> review.ReviewComm - 3, // 1: review.ReviewComm.images:type_name -> review.Image - 0, // 2: review.Review.GetReviews:input_type -> review.Request - 1, // 3: review.Review.GetReviews:output_type -> review.Result - 3, // [3:4] is the sub-list for method output_type - 2, // [2:3] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_review_proto_init() } -func file_review_proto_init() { - if File_review_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_review_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Request); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_review_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_review_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReviewComm); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_review_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Image); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_review_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_review_proto_goTypes, - DependencyIndexes: file_review_proto_depIdxs, - MessageInfos: file_review_proto_msgTypes, - }.Build() - File_review_proto = out.File - file_review_proto_rawDesc = nil - file_review_proto_goTypes = nil - file_review_proto_depIdxs = nil -} diff --git a/hotelReservation/services/review/proto/review.proto b/hotelReservation/services/review/proto/review.proto deleted file mode 100644 index 3c43a90df..000000000 --- a/hotelReservation/services/review/proto/review.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package review; -option go_package="github.com"; -service Review { - rpc GetReviews(Request) returns (Result); -} - -message Request { - string hotelId = 1; -} - -message Result { - repeated ReviewComm reviews = 1; -} - -message ReviewComm { - string reviewId = 1; - string hotelId = 2; - string name = 3; - float rating = 4; - string description = 5; - Image images = 6; -} -message Image { - string url = 1; - bool default = 2; -} - diff --git a/hotelReservation/services/review/server.go b/hotelReservation/services/review/server.go deleted file mode 100644 index dee630729..000000000 --- a/hotelReservation/services/review/server.go +++ /dev/null @@ -1,169 +0,0 @@ -package review - -import ( - "encoding/json" - "fmt" - - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - // "io/ioutil" - "net" - // "os" - // "sort" - "time" - //"sync" - - "github.com/rs/zerolog/log" - - "github.com/google/uuid" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/harlow/go-micro-services/registry" - pb "github.com/harlow/go-micro-services/services/review/proto" - "github.com/harlow/go-micro-services/tls" - "github.com/opentracing/opentracing-go" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" - - // "strings" - - "github.com/bradfitz/gomemcache/memcache" -) - -const name = "srv-review" - -// Server implements the rate service -type Server struct { - Tracer opentracing.Tracer - Port int - IpAddr string - MongoSession *mgo.Session - Registry *registry.Client - MemcClient *memcache.Client - uuid string -} - -// Run starts the server -func (s *Server) Run() error { - opentracing.SetGlobalTracer(s.Tracer) - - if s.Port == 0 { - return fmt.Errorf("server port must be set") - } - - s.uuid = uuid.New().String() - - opts := []grpc.ServerOption{ - grpc.KeepaliveParams(keepalive.ServerParameters{ - Timeout: 120 * time.Second, - }), - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - PermitWithoutStream: true, - }), - grpc.UnaryInterceptor( - otgrpc.OpenTracingServerInterceptor(s.Tracer), - ), - } - - if tlsopt := tls.GetServerOpt(); tlsopt != nil { - opts = append(opts, tlsopt) - } - - srv := grpc.NewServer(opts...) - - pb.RegisterReviewServer(srv, s) - - lis, err := net.Listen("tcp", fmt.Sprintf(":%d", s.Port)) - if err != nil { - log.Fatal().Msgf("failed to listen: %v", err) - } - - err = s.Registry.Register(name, s.uuid, s.IpAddr, s.Port) - if err != nil { - return fmt.Errorf("failed register: %v", err) - } - log.Info().Msg("Successfully registered in consul") - - return srv.Serve(lis) -} - -// Shutdown cleans up any processes -func (s *Server) Shutdown() { - s.Registry.Deregister(s.uuid) -} - -type ReviewHelper struct { - ReviewId string `bson:"reviewId"` - HotelId string `bson:"hotelId"` - Name string `bson:"name"` - Rating float32 `bson:"rating"` - Description string `bson:"description"` - Image *pb.Image `bson:"images"` -} - -type ImageHelper struct { - Url string `bson:"url"` - Default bool `bson:"default"` -} - -func (s *Server) GetReviews(ctx context.Context, req *pb.Request) (*pb.Result, error) { - - res := new(pb.Result) - reviews := make([]*pb.ReviewComm, 0) - - hotelId := req.HotelId - - memSpan, _ := opentracing.StartSpanFromContext(ctx, "memcached_get_review") - memSpan.SetTag("span.kind", "client") - item, err := s.MemcClient.Get(hotelId) - memSpan.Finish() - if err != nil && err != memcache.ErrCacheMiss { - log.Panic().Msgf("Tried to get hotelId [%v], but got memmcached error = %s", hotelId, err) - } else { - if err == memcache.ErrCacheMiss { - mongoSpan, _ := opentracing.StartSpanFromContext(ctx, "mongo_review") - mongoSpan.SetTag("span.kind", "client") - - session := s.MongoSession.Copy() - defer session.Close() - c := session.DB("review-db").C("reviews") - - var reviewHelpers []ReviewHelper - err = c.Find(bson.M{"hotelId": hotelId}).All(&reviewHelpers) - if err != nil { - log.Error().Msgf("Failed get hotels data: ", err) - } - - for _, reviewHelper := range reviewHelpers { - revComm := pb.ReviewComm{ - ReviewId: reviewHelper.ReviewId, - Name: reviewHelper.Name, - Rating: reviewHelper.Rating, - Description: reviewHelper.Description, - Images: reviewHelper.Image} - reviews = append(reviews, &revComm) - } - - reviewJson, err := json.Marshal(reviews) - if err != nil { - log.Error().Msgf("Failed to marshal hotel [id: %v] with err:", hotelId, err) - } - memcStr := string(reviewJson) - - s.MemcClient.Set(&memcache.Item{Key: hotelId, Value: []byte(memcStr)}) - } else { - reviewsStr := string(item.Value) - log.Trace().Msgf("memc hit with %v", reviewsStr) - if err := json.Unmarshal([]byte(reviewsStr), &reviews); err != nil { - log.Panic().Msgf("Failed to unmarshal reviews: %s", err) - } - } - } - - //reviewsEmpty := make([]*pb.ReviewComm, 0) - - res.Reviews = reviews - return res, nil -} - diff --git a/hotelReservation/services/search/server.go b/hotelReservation/services/search/server.go index b8d2c2e94..0a51f3813 100644 --- a/hotelReservation/services/search/server.go +++ b/hotelReservation/services/search/server.go @@ -11,14 +11,14 @@ import ( // "os" "time" + "github.com/delimitrou/DeathStarBench/hotelreservation/dialer" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + geo "github.com/delimitrou/DeathStarBench/hotelreservation/services/geo/proto" + rate "github.com/delimitrou/DeathStarBench/hotelreservation/services/rate/proto" + pb "github.com/delimitrou/DeathStarBench/hotelreservation/services/search/proto" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" "github.com/google/uuid" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/harlow/go-micro-services/dialer" - "github.com/harlow/go-micro-services/registry" - geo "github.com/harlow/go-micro-services/services/geo/proto" - rate "github.com/harlow/go-micro-services/services/rate/proto" - pb "github.com/harlow/go-micro-services/services/search/proto" - "github.com/harlow/go-micro-services/tls" opentracing "github.com/opentracing/opentracing-go" context "golang.org/x/net/context" "google.golang.org/grpc" @@ -32,11 +32,12 @@ type Server struct { geoClient geo.GeoClient rateClient rate.RateClient - Tracer opentracing.Tracer - Port int - IpAddr string - Registry *registry.Client - uuid string + Tracer opentracing.Tracer + Port int + IpAddr string + KnativeDns string + Registry *registry.Client + uuid string } // Run starts the server @@ -107,11 +108,7 @@ func (s *Server) Shutdown() { } func (s *Server) initGeoClient(name string) error { - conn, err := dialer.Dial( - name, - dialer.WithTracer(s.Tracer), - dialer.WithBalancer(s.Registry.Client), - ) + conn, err := s.getGprcConn(name) if err != nil { return fmt.Errorf("dialer error: %v", err) } @@ -120,11 +117,7 @@ func (s *Server) initGeoClient(name string) error { } func (s *Server) initRateClient(name string) error { - conn, err := dialer.Dial( - name, - dialer.WithTracer(s.Tracer), - dialer.WithBalancer(s.Registry.Client), - ) + conn, err := s.getGprcConn(name) if err != nil { return fmt.Errorf("dialer error: %v", err) } @@ -132,6 +125,20 @@ func (s *Server) initRateClient(name string) error { return nil } +func (s *Server) getGprcConn(name string) (*grpc.ClientConn, error) { + if s.KnativeDns != "" { + return dialer.Dial( + fmt.Sprintf("%s.%s", name, s.KnativeDns), + dialer.WithTracer(s.Tracer)) + } else { + return dialer.Dial( + name, + dialer.WithTracer(s.Tracer), + dialer.WithBalancer(s.Registry.Client), + ) + } +} + // Nearby returns ids of nearby hotels ordered by ranking algo func (s *Server) Nearby(ctx context.Context, req *pb.NearbyRequest) (*pb.SearchResult, error) { // find nearby hotels diff --git a/hotelReservation/services/user/server.go b/hotelReservation/services/user/server.go index ffa2324fc..47465cdb8 100644 --- a/hotelReservation/services/user/server.go +++ b/hotelReservation/services/user/server.go @@ -2,28 +2,22 @@ package user import ( "crypto/sha256" - // "encoding/json" "fmt" + "net" + "time" + "github.com/delimitrou/DeathStarBench/hotelreservation/registry" + pb "github.com/delimitrou/DeathStarBench/hotelreservation/services/user/proto" + "github.com/delimitrou/DeathStarBench/hotelreservation/tls" "github.com/google/uuid" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/harlow/go-micro-services/registry" - pb "github.com/harlow/go-micro-services/services/user/proto" - "github.com/harlow/go-micro-services/tls" "github.com/opentracing/opentracing-go" + "github.com/rs/zerolog/log" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - // "io/ioutil" - "net" - - "github.com/rs/zerolog/log" - - // "os" - "time" ) const name = "srv-user" @@ -32,12 +26,12 @@ const name = "srv-user" type Server struct { users map[string]string - Tracer opentracing.Tracer - Registry *registry.Client - Port int - IpAddr string - MongoSession *mgo.Session - uuid string + Tracer opentracing.Tracer + Registry *registry.Client + Port int + IpAddr string + MongoClient *mongo.Client + uuid string } // Run starts the server @@ -47,7 +41,7 @@ func (s *Server) Run() error { } if s.users == nil { - s.users = loadUsers(s.MongoSession) + s.users = loadUsers(s.MongoClient) } s.uuid = uuid.New().String() @@ -77,19 +71,6 @@ func (s *Server) Run() error { log.Fatal().Msgf("failed to listen: %v", err) } - // // register the service - // jsonFile, err := os.Open("config.json") - // if err != nil { - // fmt.Println(err) - // } - - // defer jsonFile.Close() - - // byteValue, _ := ioutil.ReadAll(jsonFile) - - // var result map[string]string - // json.Unmarshal([]byte(byteValue), &result) - err = s.Registry.Register(name, s.uuid, s.IpAddr, s.Port) if err != nil { return fmt.Errorf("failed register: %v", err) @@ -113,45 +94,26 @@ func (s *Server) CheckUser(ctx context.Context, req *pb.Request) (*pb.Result, er sum := sha256.Sum256([]byte(req.Password)) pass := fmt.Sprintf("%x", sum) - // session, err := mgo.Dial("mongodb-user") - // if err != nil { - // panic(err) - // } - // defer session.Close() - - // c := session.DB("user-db").C("user") - - // user := User{} - // err = c.Find(bson.M{"username": req.Username}).One(&user) - // if err != nil { - // panic(err) - // } res.Correct = false if true_pass, found := s.users[req.Username]; found { res.Correct = pass == true_pass } - // res.Correct = user.Password == pass - log.Trace().Msgf("CheckUser %d", res.Correct) return res, nil } // loadUsers loads hotel users from mongodb. -func loadUsers(session *mgo.Session) map[string]string { - // session, err := mgo.Dial("mongodb-user") - // if err != nil { - // panic(err) - // } - // defer session.Close() - s := session.Copy() - defer s.Close() - c := s.DB("user-db").C("user") - - // unmarshal json profiles +func loadUsers(client *mongo.Client) map[string]string { + collection := client.Database("user-db").Collection("user") + curr, err := collection.Find(context.TODO(), bson.D{}) + if err != nil { + log.Error().Msgf("Failed get users data: ", err) + } + var users []User - err := c.Find(bson.M{}).All(&users) + curr.All(context.TODO(), &users) if err != nil { log.Error().Msgf("Failed get users data: ", err) } diff --git a/hotelReservation/test.py b/hotelReservation/test.py deleted file mode 100644 index c1d082a4a..000000000 --- a/hotelReservation/test.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import random -import time - - -def reviews(): - url = "http://localhost:5000/review" - payload = {"hotelId":"1", "username": "Cornell_0", "password": "0000000000"} - t_before = time.time() - r = requests.get(url, params=payload) - t_after = time.time() - t = t_after - t_before - print("Response = ", r.text) - print("Time = ",t) - -reviews() diff --git a/hotelReservation/tls/options.go b/hotelReservation/tls/options.go index 498c1b961..532cffcf1 100644 --- a/hotelReservation/tls/options.go +++ b/hotelReservation/tls/options.go @@ -1,140 +1,137 @@ package tls -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "log" - "os" - "strings" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "os" + "strings" + + "github.com/rs/zerolog/log" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) - var ( - dialopt grpc.DialOption - serveropt grpc.ServerOption - httpsopt *tls.Config - cipherSuites = map[string]uint16 { - // TLS 1.0 - 1.2 cipher suites. - "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, - "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, - "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - - "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - - // our certficate doesn't support ECDSA - /* - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - */ - - // TLS 1.3 cipher suites. - /* Not supported by golang v1.9 - "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, - "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, - "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, - */ - } + dialopt grpc.DialOption + serveropt grpc.ServerOption + httpsopt *tls.Config + cipherSuites = map[string]uint16{ + // TLS 1.0 - 1.2 cipher suites. + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + + // our certficate doesn't support ECDSA + /* + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + */ + + // TLS 1.3 cipher suites. + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, + } ) func checkTLS() (bool, string) { - if val, ok := os.LookupEnv("TLS"); ok { - if ( strings.EqualFold(val, "false") || strings.EqualFold(val, "0") ) { - return false, "" - } - if ( strings.EqualFold(val, "true") || strings.EqualFold(val, "1") ) { - return true, "" - } - - if _, ok := cipherSuites[val]; ok { - return true, val - } else { - log.Printf("WARNING: specified TLS cipher suite %s is invalid or unimplemented.\n", val) - validCiphers := make([]string, 0, len(cipherSuites)) - for k := range(cipherSuites) { - validCiphers = append(validCiphers, k) - } - log.Printf("WARNING: Please use the supported TLS cipher suite %s\n", validCiphers) - return false, "" - } - } - return false, "" + if val, ok := os.LookupEnv("TLS"); ok { + if strings.EqualFold(val, "false") || strings.EqualFold(val, "0") { + return false, "" + } + if strings.EqualFold(val, "true") || strings.EqualFold(val, "1") { + return true, "" + } + + if _, ok := cipherSuites[val]; ok { + return true, val + } else { + log.Warn().Msgf("specified TLS cipher suite %v is invalid or unimplemented.", val) + validCiphers := make([]string, 0, len(cipherSuites)) + for k := range cipherSuites { + validCiphers = append(validCiphers, k) + } + log.Warn().Msgf("Please use the supported TLS cipher suite %v.", validCiphers) + return false, "" + } + } + return false, "" } func init() { - needTLS, cipher := checkTLS() - if (needTLS) { - b, err := ioutil.ReadFile("x509/ca_cert.pem") - if err != nil { - panic(err) - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - panic(fmt.Errorf("credentials: failed to append certificates")) - } - config := tls.Config{ - ServerName: "x.test.example.com", - RootCAs: cp, - } - httpsopt = &tls.Config { - PreferServerCipherSuites: true, - RootCAs: cp, - } - if cipher != "" { - log.Printf("TLS enabled cipher suite %s\n", cipher) - config.CipherSuites = append(config.CipherSuites, cipherSuites[cipher]) - httpsopt.CipherSuites = append(httpsopt.CipherSuites, cipherSuites[cipher]) - } else { - log.Println("TLS enabled without specified cipher suite") - } - - var creds credentials.TransportCredentials - creds = credentials.NewTLS(&config) - dialopt = grpc.WithTransportCredentials(creds) - - creds, err = credentials.NewServerTLSFromFile("x509/server_cert.pem", "x509/server_key.pem") - if err != nil { - panic(fmt.Sprintf("failed to create credentials: %v", err)) - } - serveropt = grpc.Creds(creds) - } else { - log.Println("TLS disabled") - dialopt = nil - serveropt = nil - } + needTLS, cipher := checkTLS() + if needTLS { + b, err := ioutil.ReadFile("x509/ca_cert.pem") + if err != nil { + log.Panic().Msgf("failed to read credentials: %v", err) + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + log.Panic().Msgf("credentials: failed to append certificates") + } + config := tls.Config{ + ServerName: "x.test.example.com", + RootCAs: cp, + } + httpsopt = &tls.Config{ + PreferServerCipherSuites: true, + RootCAs: cp, + } + if cipher != "" { + log.Info().Msgf("TLS enabled cipher suite %s", cipher) + config.CipherSuites = append(config.CipherSuites, cipherSuites[cipher]) + httpsopt.CipherSuites = append(httpsopt.CipherSuites, cipherSuites[cipher]) + switch cipher { + case "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256": + httpsopt.MinVersion = tls.VersionTLS13 + } + } else { + log.Info().Msgf("TLS enabled without specified cipher suite") + } + + var creds credentials.TransportCredentials + creds = credentials.NewTLS(&config) + dialopt = grpc.WithTransportCredentials(creds) + + creds, err = credentials.NewServerTLSFromFile("x509/server_cert.pem", "x509/server_key.pem") + if err != nil { + log.Panic().Msgf("failed to create credentials: %v", err) + } + serveropt = grpc.Creds(creds) + } else { + log.Info().Msgf("TLS disabled.") + dialopt = nil + serveropt = nil + } } - func GetDialOpt() grpc.DialOption { - return dialopt + return dialopt } - func GetServerOpt() grpc.ServerOption { - return serveropt + return serveropt } - func GetHttpsOpt() *tls.Config { - return httpsopt + return httpsopt } diff --git a/hotelReservation/tracing/tracer.go b/hotelReservation/tracing/tracer.go index ce626bd90..97fe22eee 100644 --- a/hotelReservation/tracing/tracer.go +++ b/hotelReservation/tracing/tracer.go @@ -1,12 +1,12 @@ package tracing import ( - "time" "os" "strconv" + "time" - "github.com/rs/zerolog/log" opentracing "github.com/opentracing/opentracing-go" + "github.com/rs/zerolog/log" "github.com/uber/jaeger-client-go/config" ) @@ -23,8 +23,10 @@ func Init(serviceName, host string) (opentracing.Tracer, error) { ratio = 1.0 } } + log.Info().Msgf("Jaeger client: adjusted sample ratio %f", ratio) - cfg := config.Configuration{ + tempCfg := &config.Configuration{ + ServiceName: serviceName, Sampler: &config.SamplerConfig{ Type: "probabilistic", Param: ratio, @@ -36,7 +38,13 @@ func Init(serviceName, host string) (opentracing.Tracer, error) { }, } - tracer, _, err := cfg.New(serviceName) + log.Info().Msg("Overriding Jaeger config with env variables") + cfg, err := tempCfg.FromEnv() + if err != nil { + return nil, err + } + + tracer, _, err := cfg.NewTracer() if err != nil { return nil, err } diff --git a/hotelReservation/tune/setting.go b/hotelReservation/tune/setting.go index 0ffc121d5..f78bc914d 100644 --- a/hotelReservation/tune/setting.go +++ b/hotelReservation/tune/setting.go @@ -2,38 +2,98 @@ package tune import ( "os" + "runtime/debug" + "strconv" + "strings" + "time" + "github.com/bradfitz/gomemcache/memcache" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) var ( - defaultLogLevel string = "info" + defaultGCPercent int = 100 + defaultMemCTimeout int = 2 + defaultMemCMaxIdleConns int = 512 + defaultLogLevel string = "info" ) -func setLogLevel() { +func setGCPercent() { + ratio := defaultGCPercent + if val, ok := os.LookupEnv("GC"); ok { + ratio, _ = strconv.Atoi(val) + } + + debug.SetGCPercent(ratio) + log.Info().Msgf("Tune: setGCPercent to %d", ratio) +} + +func setLogLevel() { logLevel := defaultLogLevel if val, ok := os.LookupEnv("LOG_LEVEL"); ok { logLevel = val } - switch logLevel { - case "", "ERROR", "error": // If env is unset, set level to ERROR. + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. zerolog.SetGlobalLevel(zerolog.ErrorLevel) - case "WARNING", "warning": + case "WARNING", "warning": zerolog.SetGlobalLevel(zerolog.WarnLevel) - case "DEBUG", "debug": + case "DEBUG", "debug": zerolog.SetGlobalLevel(zerolog.DebugLevel) - case "INFO", "info": + case "INFO", "info": zerolog.SetGlobalLevel(zerolog.InfoLevel) - case "TRACE", "trace": + case "TRACE", "trace": zerolog.SetGlobalLevel(zerolog.TraceLevel) default: // Set default log level to info zerolog.SetGlobalLevel(zerolog.InfoLevel) - } + } log.Info().Msgf("Set global log level: %s", logLevel) } +func GetMemCTimeout() int { + timeout := defaultMemCTimeout + if val, ok := os.LookupEnv("MEMC_TIMEOUT"); ok { + timeout, _ = strconv.Atoi(val) + } + log.Info().Msgf("Tune: GetMemCTimeout %d", timeout) + return timeout +} + +// Hack of memcache.New to avoid 'no server error' during running +func NewMemCClient(server ...string) *memcache.Client { + ss := new(memcache.ServerList) + err := ss.SetServers(server...) + if err != nil { + // Hack: panic early to avoid pod restart during running + panic(err) + //return nil, err + } else { + memc_client := memcache.NewFromSelector(ss) + memc_client.Timeout = time.Second * time.Duration(GetMemCTimeout()) + memc_client.MaxIdleConns = defaultMemCMaxIdleConns + return memc_client + } +} + +func NewMemCClient2(servers string) *memcache.Client { + ss := new(memcache.ServerList) + server_list := strings.Split(servers, ",") + err := ss.SetServers(server_list...) + if err != nil { + // Hack: panic early to avoid pod restart during running + panic(err) + //return nil, err + } else { + memc_client := memcache.NewFromSelector(ss) + memc_client.Timeout = time.Second * time.Duration(GetMemCTimeout()) + memc_client.MaxIdleConns = defaultMemCMaxIdleConns + return memc_client + } +} + func Init() { setLogLevel() + setGCPercent() } diff --git a/hotelReservation/vendor/github.com/apache/thrift/LICENSE b/hotelReservation/vendor/github.com/apache/thrift/LICENSE deleted file mode 100644 index 3b6d7d74c..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/LICENSE +++ /dev/null @@ -1,239 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) --------------------------------------------------- diff --git a/hotelReservation/vendor/github.com/apache/thrift/NOTICE b/hotelReservation/vendor/github.com/apache/thrift/NOTICE deleted file mode 100644 index c23995a23..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Apache Thrift -Copyright 2006-2010 The Apache Software Foundation. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/hotelReservation/vendor/github.com/apache/thrift/contrib/fb303/LICENSE b/hotelReservation/vendor/github.com/apache/thrift/contrib/fb303/LICENSE deleted file mode 100644 index 4eacb6431..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/contrib/fb303/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. diff --git a/hotelReservation/vendor/github.com/apache/thrift/debian/copyright b/hotelReservation/vendor/github.com/apache/thrift/debian/copyright deleted file mode 100644 index 850643c9a..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/debian/copyright +++ /dev/null @@ -1,129 +0,0 @@ -This package was debianized by Thrift Developer's . - - -This package and the Debian packaging is licensed under the Apache License, -see `/usr/share/common-licenses/Apache-2.0'. - -The following information was copied from Apache Thrift LICENSE file. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - - --------------------------------------------------- -The following files contain some portions of code contributed under -the Thrift Software License (see doc/old-thrift-license.txt), and relicensed -under the Apache 2.0 License: - - compiler/cpp/Makefile.am - compiler/cpp/src/generate/t_cocoa_generator.cc - compiler/cpp/src/generate/t_cpp_generator.cc - compiler/cpp/src/generate/t_csharp_generator.cc - compiler/cpp/src/generate/t_erl_generator.cc - compiler/cpp/src/generate/t_hs_generator.cc - compiler/cpp/src/generate/t_java_generator.cc - compiler/cpp/src/generate/t_ocaml_generator.cc - compiler/cpp/src/generate/t_perl_generator.cc - compiler/cpp/src/generate/t_php_generator.cc - compiler/cpp/src/generate/t_py_generator.cc - compiler/cpp/src/generate/t_rb_generator.cc - compiler/cpp/src/generate/t_st_generator.cc - compiler/cpp/src/generate/t_xsd_generator.cc - compiler/cpp/src/main.cc - compiler/cpp/src/parse/t_field.h - compiler/cpp/src/parse/t_program.h - compiler/cpp/src/platform.h - compiler/cpp/src/thriftl.ll - compiler/cpp/src/thrifty.yy - lib/csharp/src/Protocol/TBinaryProtocol.cs - lib/csharp/src/Protocol/TField.cs - lib/csharp/src/Protocol/TList.cs - lib/csharp/src/Protocol/TMap.cs - lib/csharp/src/Protocol/TMessage.cs - lib/csharp/src/Protocol/TMessageType.cs - lib/csharp/src/Protocol/TProtocol.cs - lib/csharp/src/Protocol/TProtocolException.cs - lib/csharp/src/Protocol/TProtocolFactory.cs - lib/csharp/src/Protocol/TProtocolUtil.cs - lib/csharp/src/Protocol/TSet.cs - lib/csharp/src/Protocol/TStruct.cs - lib/csharp/src/Protocol/TType.cs - lib/csharp/src/Server/TServer.cs - lib/csharp/src/Server/TSimpleServer.cs - lib/csharp/src/Server/TThreadPoolServer.cs - lib/csharp/src/TApplicationException.cs - lib/csharp/src/Thrift.csproj - lib/csharp/src/Thrift.sln - lib/csharp/src/TProcessor.cs - lib/csharp/src/Transport/TServerSocket.cs - lib/csharp/src/Transport/TServerTransport.cs - lib/csharp/src/Transport/TSocket.cs - lib/csharp/src/Transport/TStreamTransport.cs - lib/csharp/src/Transport/TTransport.cs - lib/csharp/src/Transport/TTransportException.cs - lib/csharp/src/Transport/TTransportFactory.cs - lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs - lib/csharp/ThriftMSBuildTask/ThriftBuild.cs - lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj - lib/rb/lib/thrift.rb - lib/st/README - lib/st/thrift.st - test/OptionalRequiredTest.cpp - test/OptionalRequiredTest.thrift - test/ThriftTest.thrift - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the compiler/cpp/src/md5.[ch] components: - -/* - Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ - ---------------------------------------------------- -For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki, -lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components: - Copyright (C) 1999 - 2007 Markus Mottl - -Licensed under the terms of the GNU Lesser General Public License 2.1 -(see doc/lgpl-2.1.txt for the full terms of this license) diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER b/hotelReservation/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER deleted file mode 100644 index 4eacb6431..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER +++ /dev/null @@ -1,16 +0,0 @@ -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go deleted file mode 100644 index 6655cc5a9..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -const ( - UNKNOWN_APPLICATION_EXCEPTION = 0 - UNKNOWN_METHOD = 1 - INVALID_MESSAGE_TYPE_EXCEPTION = 2 - WRONG_METHOD_NAME = 3 - BAD_SEQUENCE_ID = 4 - MISSING_RESULT = 5 - INTERNAL_ERROR = 6 - PROTOCOL_ERROR = 7 -) - -// Application level Thrift exception -type TApplicationException interface { - TException - TypeId() int32 - Read(iprot TProtocol) (TApplicationException, error) - Write(oprot TProtocol) error -} - -type tApplicationException struct { - message string - type_ int32 -} - -func (e tApplicationException) Error() string { - return e.message -} - -func NewTApplicationException(type_ int32, message string) TApplicationException { - return &tApplicationException{message, type_} -} - -func (p *tApplicationException) TypeId() int32 { - return p.type_ -} - -func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) { - _, err := iprot.ReadStructBegin() - if err != nil { - return nil, err - } - - message := "" - type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) - - for { - _, ttype, id, err := iprot.ReadFieldBegin() - if err != nil { - return nil, err - } - if ttype == STOP { - break - } - switch id { - case 1: - if ttype == STRING { - if message, err = iprot.ReadString(); err != nil { - return nil, err - } - } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - case 2: - if ttype == I32 { - if type_, err = iprot.ReadI32(); err != nil { - return nil, err - } - } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - default: - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return nil, err - } - } - if err = iprot.ReadFieldEnd(); err != nil { - return nil, err - } - } - return NewTApplicationException(type_, message), iprot.ReadStructEnd() -} - -func (p *tApplicationException) Write(oprot TProtocol) (err error) { - err = oprot.WriteStructBegin("TApplicationException") - if len(p.Error()) > 0 { - err = oprot.WriteFieldBegin("message", STRING, 1) - if err != nil { - return - } - err = oprot.WriteString(p.Error()) - if err != nil { - return - } - err = oprot.WriteFieldEnd() - if err != nil { - return - } - } - err = oprot.WriteFieldBegin("type", I32, 2) - if err != nil { - return - } - err = oprot.WriteI32(p.type_) - if err != nil { - return - } - err = oprot.WriteFieldEnd() - if err != nil { - return - } - err = oprot.WriteFieldStop() - if err != nil { - return - } - err = oprot.WriteStructEnd() - return -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go deleted file mode 100644 index 690d34111..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go +++ /dev/null @@ -1,514 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -type TBinaryProtocol struct { - trans TRichTransport - origTransport TTransport - reader io.Reader - writer io.Writer - strictRead bool - strictWrite bool - buffer [64]byte -} - -type TBinaryProtocolFactory struct { - strictRead bool - strictWrite bool -} - -func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { - return NewTBinaryProtocol(t, false, true) -} - -func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { - p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} - if et, ok := t.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(t) - } - p.reader = p.trans - p.writer = p.trans - return p -} - -func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { - return NewTBinaryProtocolFactory(false, true) -} - -func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { - return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} -} - -func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { - return NewTBinaryProtocol(t, p.strictRead, p.strictWrite) -} - -/** - * Writing Methods - */ - -func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { - if p.strictWrite { - version := uint32(VERSION_1) | uint32(typeId) - e := p.WriteI32(int32(version)) - if e != nil { - return e - } - e = p.WriteString(name) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } else { - e := p.WriteString(name) - if e != nil { - return e - } - e = p.WriteByte(int8(typeId)) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } - return nil -} - -func (p *TBinaryProtocol) WriteMessageEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteStructBegin(name string) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - e := p.WriteByte(int8(typeId)) - if e != nil { - return e - } - e = p.WriteI16(id) - return e -} - -func (p *TBinaryProtocol) WriteFieldEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldStop() error { - e := p.WriteByte(STOP) - return e -} - -func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - e := p.WriteByte(int8(keyType)) - if e != nil { - return e - } - e = p.WriteByte(int8(valueType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteMapEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error { - e := p.WriteByte(int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteListEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error { - e := p.WriteByte(int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteSetEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteBool(value bool) error { - if value { - return p.WriteByte(1) - } - return p.WriteByte(0) -} - -func (p *TBinaryProtocol) WriteByte(value int8) error { - e := p.trans.WriteByte(byte(value)) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI16(value int16) error { - v := p.buffer[0:2] - binary.BigEndian.PutUint16(v, uint16(value)) - _, e := p.writer.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI32(value int32) error { - v := p.buffer[0:4] - binary.BigEndian.PutUint32(v, uint32(value)) - _, e := p.writer.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI64(value int64) error { - v := p.buffer[0:8] - binary.BigEndian.PutUint64(v, uint64(value)) - _, err := p.writer.Write(v) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteDouble(value float64) error { - return p.WriteI64(int64(math.Float64bits(value))) -} - -func (p *TBinaryProtocol) WriteString(value string) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.WriteString(value) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteBinary(value []byte) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.writer.Write(value) - return NewTProtocolException(err) -} - -/** - * Reading methods - */ - -func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - size, e := p.ReadI32() - if e != nil { - return "", typeId, 0, NewTProtocolException(e) - } - if size < 0 { - typeId = TMessageType(size & 0x0ff) - version := int64(int64(size) & VERSION_MASK) - if version != VERSION_1 { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) - } - name, e = p.ReadString() - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - seqId, e = p.ReadI32() - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - return name, typeId, seqId, nil - } - if p.strictRead { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) - } - name, e2 := p.readStringBody(size) - if e2 != nil { - return name, typeId, seqId, e2 - } - b, e3 := p.ReadByte() - if e3 != nil { - return name, typeId, seqId, e3 - } - typeId = TMessageType(b) - seqId, e4 := p.ReadI32() - if e4 != nil { - return name, typeId, seqId, e4 - } - return name, typeId, seqId, nil -} - -func (p *TBinaryProtocol) ReadMessageEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) { - return -} - -func (p *TBinaryProtocol) ReadStructEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) { - t, err := p.ReadByte() - typeId = TType(t) - if err != nil { - return name, typeId, seqId, err - } - if t != STOP { - seqId, err = p.ReadI16() - } - return name, typeId, seqId, err -} - -func (p *TBinaryProtocol) ReadFieldEnd() error { - return nil -} - -var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) - -func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) { - k, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - kType = TType(k) - v, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - vType = TType(v) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return kType, vType, size, nil -} - -func (p *TBinaryProtocol) ReadMapEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - return -} - -func (p *TBinaryProtocol) ReadListEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return elemType, size, nil -} - -func (p *TBinaryProtocol) ReadSetEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadBool() (bool, error) { - b, e := p.ReadByte() - v := true - if b != 1 { - v = false - } - return v, e -} - -func (p *TBinaryProtocol) ReadByte() (int8, error) { - v, err := p.trans.ReadByte() - return int8(v), err -} - -func (p *TBinaryProtocol) ReadI16() (value int16, err error) { - buf := p.buffer[0:2] - err = p.readAll(buf) - value = int16(binary.BigEndian.Uint16(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI32() (value int32, err error) { - buf := p.buffer[0:4] - err = p.readAll(buf) - value = int32(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI64() (value int64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = int64(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadDouble() (value float64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = math.Float64frombits(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadString() (value string, err error) { - size, e := p.ReadI32() - if e != nil { - return "", e - } - if size < 0 { - err = invalidDataLength - return - } - - return p.readStringBody(size) -} - -func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { - size, e := p.ReadI32() - if e != nil { - return nil, e - } - if size < 0 { - return nil, invalidDataLength - } - if uint64(size) > p.trans.RemainingBytes() { - return nil, invalidDataLength - } - - isize := int(size) - buf := make([]byte, isize) - _, err := io.ReadFull(p.trans, buf) - return buf, NewTProtocolException(err) -} - -func (p *TBinaryProtocol) Flush() (err error) { - return NewTProtocolException(p.trans.Flush()) -} - -func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TBinaryProtocol) Transport() TTransport { - return p.origTransport -} - -func (p *TBinaryProtocol) readAll(buf []byte) error { - _, err := io.ReadFull(p.reader, buf) - return NewTProtocolException(err) -} - -const readLimit = 32768 - -func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { - if size < 0 { - return "", nil - } - if uint64(size) > p.trans.RemainingBytes() { - return "", invalidDataLength - } - - var ( - buf bytes.Buffer - e error - b []byte - ) - - switch { - case int(size) <= len(p.buffer): - b = p.buffer[:size] // avoids allocation for small reads - case int(size) < readLimit: - b = make([]byte, size) - default: - b = make([]byte, readLimit) - } - - for size > 0 { - _, e = io.ReadFull(p.trans, b) - buf.Write(b) - if e != nil { - break - } - size -= readLimit - if size < readLimit && size > 0 { - b = b[:size] - } - } - return buf.String(), NewTProtocolException(e) -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go deleted file mode 100644 index f73a98b6c..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" -) - -type TBufferedTransportFactory struct { - size int -} - -type TBufferedTransport struct { - bufio.ReadWriter - tp TTransport -} - -func (p *TBufferedTransportFactory) GetTransport(trans TTransport) TTransport { - return NewTBufferedTransport(trans, p.size) -} - -func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { - return &TBufferedTransportFactory{size: bufferSize} -} - -func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { - return &TBufferedTransport{ - ReadWriter: bufio.ReadWriter{ - Reader: bufio.NewReaderSize(trans, bufferSize), - Writer: bufio.NewWriterSize(trans, bufferSize), - }, - tp: trans, - } -} - -func (p *TBufferedTransport) IsOpen() bool { - return p.tp.IsOpen() -} - -func (p *TBufferedTransport) Open() (err error) { - return p.tp.Open() -} - -func (p *TBufferedTransport) Close() (err error) { - return p.tp.Close() -} - -func (p *TBufferedTransport) Read(b []byte) (int, error) { - n, err := p.ReadWriter.Read(b) - if err != nil { - p.ReadWriter.Reader.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Write(b []byte) (int, error) { - n, err := p.ReadWriter.Write(b) - if err != nil { - p.ReadWriter.Writer.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Flush() error { - if err := p.ReadWriter.Flush(); err != nil { - p.ReadWriter.Writer.Reset(p.tp) - return err - } - return p.tp.Flush() -} - -func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { - return p.tp.RemainingBytes() -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go deleted file mode 100644 index 0bc5fddeb..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go +++ /dev/null @@ -1,815 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - COMPACT_PROTOCOL_ID = 0x082 - COMPACT_VERSION = 1 - COMPACT_VERSION_MASK = 0x1f - COMPACT_TYPE_MASK = 0x0E0 - COMPACT_TYPE_BITS = 0x07 - COMPACT_TYPE_SHIFT_AMOUNT = 5 -) - -type tCompactType byte - -const ( - COMPACT_BOOLEAN_TRUE = 0x01 - COMPACT_BOOLEAN_FALSE = 0x02 - COMPACT_BYTE = 0x03 - COMPACT_I16 = 0x04 - COMPACT_I32 = 0x05 - COMPACT_I64 = 0x06 - COMPACT_DOUBLE = 0x07 - COMPACT_BINARY = 0x08 - COMPACT_LIST = 0x09 - COMPACT_SET = 0x0A - COMPACT_MAP = 0x0B - COMPACT_STRUCT = 0x0C -) - -var ( - ttypeToCompactType map[TType]tCompactType -) - -func init() { - ttypeToCompactType = map[TType]tCompactType{ - STOP: STOP, - BOOL: COMPACT_BOOLEAN_TRUE, - BYTE: COMPACT_BYTE, - I16: COMPACT_I16, - I32: COMPACT_I32, - I64: COMPACT_I64, - DOUBLE: COMPACT_DOUBLE, - STRING: COMPACT_BINARY, - LIST: COMPACT_LIST, - SET: COMPACT_SET, - MAP: COMPACT_MAP, - STRUCT: COMPACT_STRUCT, - } -} - -type TCompactProtocolFactory struct{} - -func NewTCompactProtocolFactory() *TCompactProtocolFactory { - return &TCompactProtocolFactory{} -} - -func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTCompactProtocol(trans) -} - -type TCompactProtocol struct { - trans TRichTransport - origTransport TTransport - - // Used to keep track of the last field for the current and previous structs, - // so we can do the delta stuff. - lastField []int - lastFieldId int - - // If we encounter a boolean field begin, save the TField here so it can - // have the value incorporated. - booleanFieldName string - booleanFieldId int16 - booleanFieldPending bool - - // If we read a field header, and it's a boolean field, save the boolean - // value here so that readBool can use it. - boolValue bool - boolValueIsNotNull bool - buffer [64]byte -} - -// Create a TCompactProtocol given a TTransport -func NewTCompactProtocol(trans TTransport) *TCompactProtocol { - p := &TCompactProtocol{origTransport: trans, lastField: []int{}} - if et, ok := trans.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(trans) - } - - return p - -} - -// -// Public Writing methods. -// - -// Write a message header to the wire. Compact Protocol messages contain the -// protocol version so we can migrate forwards in the future if need be. -func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { - err := p.writeByteDirect(COMPACT_PROTOCOL_ID) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) - if err != nil { - return NewTProtocolException(err) - } - _, err = p.writeVarint32(seqid) - if err != nil { - return NewTProtocolException(err) - } - e := p.WriteString(name) - return e - -} - -func (p *TCompactProtocol) WriteMessageEnd() error { return nil } - -// Write a struct begin. This doesn't actually put anything on the wire. We -// use it as an opportunity to put special placeholder markers on the field -// stack so we can get the field id deltas correct. -func (p *TCompactProtocol) WriteStructBegin(name string) error { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return nil -} - -// Write a struct end. This doesn't actually put anything on the wire. We use -// this as an opportunity to pop the last field from the current struct off -// of the field stack. -func (p *TCompactProtocol) WriteStructEnd() error { - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if typeId == BOOL { - // we want to possibly include the value, so we'll wait. - p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true - return nil - } - _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) - return NewTProtocolException(err) -} - -// The workhorse of writeFieldBegin. It has the option of doing a -// 'type override' of the type header. This is used specifically in the -// boolean field case. -func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) { - // short lastField = lastField_.pop(); - - // if there's a type override, use that. - var typeToWrite byte - if typeOverride == 0xFF { - typeToWrite = byte(p.getCompactType(typeId)) - } else { - typeToWrite = typeOverride - } - // check if we can use delta encoding for the field id - fieldId := int(id) - written := 0 - if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { - // write them together - err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) - if err != nil { - return 0, err - } - } else { - // write them separate - err := p.writeByteDirect(typeToWrite) - if err != nil { - return 0, err - } - err = p.WriteI16(id) - written = 1 + 2 - if err != nil { - return 0, err - } - } - - p.lastFieldId = fieldId - // p.lastField.Push(field.id); - return written, nil -} - -func (p *TCompactProtocol) WriteFieldEnd() error { return nil } - -func (p *TCompactProtocol) WriteFieldStop() error { - err := p.writeByteDirect(STOP) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - if size == 0 { - err := p.writeByteDirect(0) - return NewTProtocolException(err) - } - _, err := p.writeVarint32(int32(size)) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapEnd() error { return nil } - -// Write a list header. -func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteListEnd() error { return nil } - -// Write a set header. -func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteSetEnd() error { return nil } - -func (p *TCompactProtocol) WriteBool(value bool) error { - v := byte(COMPACT_BOOLEAN_FALSE) - if value { - v = byte(COMPACT_BOOLEAN_TRUE) - } - if p.booleanFieldPending { - // we haven't written the field header yet - _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) - p.booleanFieldPending = false - return NewTProtocolException(err) - } - // we're not part of a field, so just write the value. - err := p.writeByteDirect(v) - return NewTProtocolException(err) -} - -// Write a byte. Nothing to see here! -func (p *TCompactProtocol) WriteByte(value int8) error { - err := p.writeByteDirect(byte(value)) - return NewTProtocolException(err) -} - -// Write an I16 as a zigzag varint. -func (p *TCompactProtocol) WriteI16(value int16) error { - _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) - return NewTProtocolException(err) -} - -// Write an i32 as a zigzag varint. -func (p *TCompactProtocol) WriteI32(value int32) error { - _, err := p.writeVarint32(p.int32ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write an i64 as a zigzag varint. -func (p *TCompactProtocol) WriteI64(value int64) error { - _, err := p.writeVarint64(p.int64ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write a double to the wire as 8 bytes. -func (p *TCompactProtocol) WriteDouble(value float64) error { - buf := p.buffer[0:8] - binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) - _, err := p.trans.Write(buf) - return NewTProtocolException(err) -} - -// Write a string to the wire with a varint size preceding. -func (p *TCompactProtocol) WriteString(value string) error { - _, e := p.writeVarint32(int32(len(value))) - if e != nil { - return NewTProtocolException(e) - } - if len(value) > 0 { - } - _, e = p.trans.WriteString(value) - return e -} - -// Write a byte array, using a varint for the size. -func (p *TCompactProtocol) WriteBinary(bin []byte) error { - _, e := p.writeVarint32(int32(len(bin))) - if e != nil { - return NewTProtocolException(e) - } - if len(bin) > 0 { - _, e = p.trans.Write(bin) - return NewTProtocolException(e) - } - return nil -} - -// -// Reading methods. -// - -// Read a message header. -func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - - protocolId, err := p.readByteDirect() - if err != nil { - return - } - - if protocolId != COMPACT_PROTOCOL_ID { - e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) - return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) - } - - versionAndType, err := p.readByteDirect() - if err != nil { - return - } - - version := versionAndType & COMPACT_VERSION_MASK - typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) - if version != COMPACT_VERSION { - e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) - err = NewTProtocolExceptionWithType(BAD_VERSION, e) - return - } - seqId, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - name, err = p.ReadString() - return -} - -func (p *TCompactProtocol) ReadMessageEnd() error { return nil } - -// Read a struct begin. There's nothing on the wire for this, but it is our -// opportunity to push a new struct begin marker onto the field stack. -func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return -} - -// Doesn't actually consume any wire data, just removes the last field for -// this struct from the field stack. -func (p *TCompactProtocol) ReadStructEnd() error { - // consume the last field we read off the wire. - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -// Read a field header off the wire. -func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { - t, err := p.readByteDirect() - if err != nil { - return - } - - // if it's a stop, then we can return immediately, as the struct is over. - if (t & 0x0f) == STOP { - return "", STOP, 0, nil - } - - // mask off the 4 MSB of the type header. it could contain a field id delta. - modifier := int16((t & 0xf0) >> 4) - if modifier == 0 { - // not a delta. look ahead for the zigzag varint field id. - id, err = p.ReadI16() - if err != nil { - return - } - } else { - // has a delta. add the delta to the last read field id. - id = int16(p.lastFieldId) + modifier - } - typeId, e := p.getTType(tCompactType(t & 0x0f)) - if e != nil { - err = NewTProtocolException(e) - return - } - - // if this happens to be a boolean field, the value is encoded in the type - if p.isBoolType(t) { - // save the boolean value in a special instance variable. - p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) - p.boolValueIsNotNull = true - } - - // push the new field onto the field stack so we can keep the deltas going. - p.lastFieldId = int(id) - return -} - -func (p *TCompactProtocol) ReadFieldEnd() error { return nil } - -// Read a map header off the wire. If the size is zero, skip reading the key -// and value type. This means that 0-length maps will yield TMaps without the -// "correct" types. -func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { - size32, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - keyAndValueType := byte(STOP) - if size != 0 { - keyAndValueType, err = p.readByteDirect() - if err != nil { - return - } - } - keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) - valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) - return -} - -func (p *TCompactProtocol) ReadMapEnd() error { return nil } - -// Read a list header off the wire. If the list size is 0-14, the size will -// be packed into the element type header. If it's a longer list, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) { - size_and_type, err := p.readByteDirect() - if err != nil { - return - } - size = int((size_and_type >> 4) & 0x0f) - if size == 15 { - size2, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size2 < 0 { - err = invalidDataLength - return - } - size = int(size2) - } - elemType, e := p.getTType(tCompactType(size_and_type)) - if e != nil { - err = NewTProtocolException(e) - return - } - return -} - -func (p *TCompactProtocol) ReadListEnd() error { return nil } - -// Read a set header off the wire. If the set size is 0-14, the size will -// be packed into the element type header. If it's a longer set, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) { - return p.ReadListBegin() -} - -func (p *TCompactProtocol) ReadSetEnd() error { return nil } - -// Read a boolean off the wire. If this is a boolean field, the value should -// already have been read during readFieldBegin, so we'll just consume the -// pre-stored value. Otherwise, read a byte. -func (p *TCompactProtocol) ReadBool() (value bool, err error) { - if p.boolValueIsNotNull { - p.boolValueIsNotNull = false - return p.boolValue, nil - } - v, err := p.readByteDirect() - return v == COMPACT_BOOLEAN_TRUE, err -} - -// Read a single byte off the wire. Nothing interesting here. -func (p *TCompactProtocol) ReadByte() (int8, error) { - v, err := p.readByteDirect() - if err != nil { - return 0, NewTProtocolException(err) - } - return int8(v), err -} - -// Read an i16 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI16() (value int16, err error) { - v, err := p.ReadI32() - return int16(v), err -} - -// Read an i32 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI32() (value int32, err error) { - v, e := p.readVarint32() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt32(v) - return value, nil -} - -// Read an i64 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI64() (value int64, err error) { - v, e := p.readVarint64() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt64(v) - return value, nil -} - -// No magic here - just read a double off the wire. -func (p *TCompactProtocol) ReadDouble() (value float64, err error) { - longBits := p.buffer[0:8] - _, e := io.ReadFull(p.trans, longBits) - if e != nil { - return 0.0, NewTProtocolException(e) - } - return math.Float64frombits(p.bytesToUint64(longBits)), nil -} - -// Reads a []byte (via readBinary), and then UTF-8 decodes it. -func (p *TCompactProtocol) ReadString() (value string, err error) { - length, e := p.readVarint32() - if e != nil { - return "", NewTProtocolException(e) - } - if length < 0 { - return "", invalidDataLength - } - if uint64(length) > p.trans.RemainingBytes() { - return "", invalidDataLength - } - - if length == 0 { - return "", nil - } - var buf []byte - if length <= int32(len(p.buffer)) { - buf = p.buffer[0:length] - } else { - buf = make([]byte, length) - } - _, e = io.ReadFull(p.trans, buf) - return string(buf), NewTProtocolException(e) -} - -// Read a []byte from the wire. -func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { - length, e := p.readVarint32() - if e != nil { - return nil, NewTProtocolException(e) - } - if length == 0 { - return []byte{}, nil - } - if length < 0 { - return nil, invalidDataLength - } - if uint64(length) > p.trans.RemainingBytes() { - return nil, invalidDataLength - } - - buf := make([]byte, length) - _, e = io.ReadFull(p.trans, buf) - return buf, NewTProtocolException(e) -} - -func (p *TCompactProtocol) Flush() (err error) { - return NewTProtocolException(p.trans.Flush()) -} - -func (p *TCompactProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TCompactProtocol) Transport() TTransport { - return p.origTransport -} - -// -// Internal writing methods -// - -// Abstract method for writing the start of lists and sets. List and sets on -// the wire differ only by the type indicator. -func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { - if size <= 14 { - return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) - } - err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) - if err != nil { - return 0, err - } - m, err := p.writeVarint32(int32(size)) - return 1 + m, err -} - -// Write an i32 as a varint. Results in 1-5 bytes on the wire. -// TODO(pomack): make a permanent buffer like writeVarint64? -func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { - i32buf := p.buffer[0:5] - idx := 0 - for { - if (n & ^0x7F) == 0 { - i32buf[idx] = byte(n) - idx++ - // p.writeByteDirect(byte(n)); - break - // return; - } else { - i32buf[idx] = byte((n & 0x7F) | 0x80) - idx++ - // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); - u := uint32(n) - n = int32(u >> 7) - } - } - return p.trans.Write(i32buf[0:idx]) -} - -// Write an i64 as a varint. Results in 1-10 bytes on the wire. -func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { - varint64out := p.buffer[0:10] - idx := 0 - for { - if (n & ^0x7F) == 0 { - varint64out[idx] = byte(n) - idx++ - break - } else { - varint64out[idx] = byte((n & 0x7F) | 0x80) - idx++ - u := uint64(n) - n = int64(u >> 7) - } - } - return p.trans.Write(varint64out[0:idx]) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { - return (l << 1) ^ (l >> 63) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { - return (n << 1) ^ (n >> 31) -} - -func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { - binary.LittleEndian.PutUint64(buf, n) -} - -func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { - binary.LittleEndian.PutUint64(buf, uint64(n)) -} - -// Writes a byte without any possibility of all that field header nonsense. -// Used internally by other writing methods that know they need to write a byte. -func (p *TCompactProtocol) writeByteDirect(b byte) error { - return p.trans.WriteByte(b) -} - -// Writes a byte without any possibility of all that field header nonsense. -func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { - return 1, p.writeByteDirect(byte(n)) -} - -// -// Internal reading methods -// - -// Read an i32 from the wire as a varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 5 bytes. -func (p *TCompactProtocol) readVarint32() (int32, error) { - // if the wire contains the right stuff, this will just truncate the i64 we - // read and get us the right sign. - v, err := p.readVarint64() - return int32(v), err -} - -// Read an i64 from the wire as a proper varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 10 bytes. -func (p *TCompactProtocol) readVarint64() (int64, error) { - shift := uint(0) - result := int64(0) - for { - b, err := p.readByteDirect() - if err != nil { - return 0, err - } - result |= int64(b&0x7f) << shift - if (b & 0x80) != 0x80 { - break - } - shift += 7 - } - return result, nil -} - -// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. -func (p *TCompactProtocol) readByteDirect() (byte, error) { - return p.trans.ReadByte() -} - -// -// encoding helpers -// - -// Convert from zigzag int to int. -func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { - u := uint32(n) - return int32(u>>1) ^ -(n & 1) -} - -// Convert from zigzag long to long. -func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { - u := uint64(n) - return int64(u>>1) ^ -(n & 1) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { - return int64(binary.LittleEndian.Uint64(b)) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { - return binary.LittleEndian.Uint64(b) -} - -// -// type testing and converting -// - -func (p *TCompactProtocol) isBoolType(b byte) bool { - return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE -} - -// Given a tCompactType constant, convert it to its corresponding -// TType value. -func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { - switch byte(t) & 0x0f { - case STOP: - return STOP, nil - case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: - return BOOL, nil - case COMPACT_BYTE: - return BYTE, nil - case COMPACT_I16: - return I16, nil - case COMPACT_I32: - return I32, nil - case COMPACT_I64: - return I64, nil - case COMPACT_DOUBLE: - return DOUBLE, nil - case COMPACT_BINARY: - return STRING, nil - case COMPACT_LIST: - return LIST, nil - case COMPACT_SET: - return SET, nil - case COMPACT_MAP: - return MAP, nil - case COMPACT_STRUCT: - return STRUCT, nil - } - return STOP, TException(fmt.Errorf("don't know what type: %s", t&0x0f)) -} - -// Given a TType value, find the appropriate TCompactProtocol.Types constant. -func (p *TCompactProtocol) getCompactType(t TType) tCompactType { - return ttypeToCompactType[t] -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go deleted file mode 100644 index d37252cc6..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "log" -) - -type TDebugProtocol struct { - Delegate TProtocol - LogPrefix string -} - -type TDebugProtocolFactory struct { - Underlying TProtocolFactory - LogPrefix string -} - -func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - } -} - -func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return &TDebugProtocol{ - Delegate: t.Underlying.GetProtocol(trans), - LogPrefix: t.LogPrefix, - } -} - -func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { - err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid) - log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) - return err -} -func (tdp *TDebugProtocol) WriteMessageEnd() error { - err := tdp.Delegate.WriteMessageEnd() - log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteStructBegin(name string) error { - err := tdp.Delegate.WriteStructBegin(name) - log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) - return err -} -func (tdp *TDebugProtocol) WriteStructEnd() error { - err := tdp.Delegate.WriteStructEnd() - log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - err := tdp.Delegate.WriteFieldBegin(name, typeId, id) - log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldEnd() error { - err := tdp.Delegate.WriteFieldEnd() - log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldStop() error { - err := tdp.Delegate.WriteFieldStop() - log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - err := tdp.Delegate.WriteMapBegin(keyType, valueType, size) - log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteMapEnd() error { - err := tdp.Delegate.WriteMapEnd() - log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error { - err := tdp.Delegate.WriteListBegin(elemType, size) - log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteListEnd() error { - err := tdp.Delegate.WriteListEnd() - log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error { - err := tdp.Delegate.WriteSetBegin(elemType, size) - log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteSetEnd() error { - err := tdp.Delegate.WriteSetEnd() - log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteBool(value bool) error { - err := tdp.Delegate.WriteBool(value) - log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteByte(value int8) error { - err := tdp.Delegate.WriteByte(value) - log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI16(value int16) error { - err := tdp.Delegate.WriteI16(value) - log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI32(value int32) error { - err := tdp.Delegate.WriteI32(value) - log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI64(value int64) error { - err := tdp.Delegate.WriteI64(value) - log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteDouble(value float64) error { - err := tdp.Delegate.WriteDouble(value) - log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteString(value string) error { - err := tdp.Delegate.WriteString(value) - log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteBinary(value []byte) error { - err := tdp.Delegate.WriteBinary(value) - log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} - -func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { - name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin() - log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) - return -} -func (tdp *TDebugProtocol) ReadMessageEnd() (err error) { - err = tdp.Delegate.ReadMessageEnd() - log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) { - name, err = tdp.Delegate.ReadStructBegin() - log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) - return -} -func (tdp *TDebugProtocol) ReadStructEnd() (err error) { - err = tdp.Delegate.ReadStructEnd() - log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { - name, typeId, id, err = tdp.Delegate.ReadFieldBegin() - log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) - return -} -func (tdp *TDebugProtocol) ReadFieldEnd() (err error) { - err = tdp.Delegate.ReadFieldEnd() - log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { - keyType, valueType, size, err = tdp.Delegate.ReadMapBegin() - log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) - return -} -func (tdp *TDebugProtocol) ReadMapEnd() (err error) { - err = tdp.Delegate.ReadMapEnd() - log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadListBegin() - log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - return -} -func (tdp *TDebugProtocol) ReadListEnd() (err error) { - err = tdp.Delegate.ReadListEnd() - log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadSetBegin() - log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - return -} -func (tdp *TDebugProtocol) ReadSetEnd() (err error) { - err = tdp.Delegate.ReadSetEnd() - log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadBool() (value bool, err error) { - value, err = tdp.Delegate.ReadBool() - log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadByte() (value int8, err error) { - value, err = tdp.Delegate.ReadByte() - log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI16() (value int16, err error) { - value, err = tdp.Delegate.ReadI16() - log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI32() (value int32, err error) { - value, err = tdp.Delegate.ReadI32() - log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI64() (value int64, err error) { - value, err = tdp.Delegate.ReadI64() - log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) { - value, err = tdp.Delegate.ReadDouble() - log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadString() (value string, err error) { - value, err = tdp.Delegate.ReadString() - log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) { - value, err = tdp.Delegate.ReadBinary() - log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) { - err = tdp.Delegate.Skip(fieldType) - log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) - return -} -func (tdp *TDebugProtocol) Flush() (err error) { - err = tdp.Delegate.Flush() - log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err) - return -} - -func (tdp *TDebugProtocol) Transport() TTransport { - return tdp.Delegate.Transport() -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go deleted file mode 100644 index 91a0983a4..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TDeserializer struct { - Transport TTransport - Protocol TProtocol -} - -func NewTDeserializer() *TDeserializer { - var transport TTransport - transport = NewTMemoryBufferLen(1024) - - protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &TDeserializer{ - transport, - protocol} -} - -func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) { - err = nil - if _, err = t.Transport.Write([]byte(s)); err != nil { - return - } - if err = msg.Read(t.Protocol); err != nil { - return - } - return -} - -func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) { - err = nil - if _, err = t.Transport.Write(b); err != nil { - return - } - if err = msg.Read(t.Protocol); err != nil { - return - } - return -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/exception.go deleted file mode 100644 index ea8d6f661..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/exception.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" -) - -// Generic Thrift exception -type TException interface { - error -} - -// Prepends additional information to an error without losing the Thrift exception interface -func PrependError(prepend string, err error) error { - if t, ok := err.(TTransportException); ok { - return NewTTransportException(t.TypeId(), prepend+t.Error()) - } - if t, ok := err.(TProtocolException); ok { - return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error())) - } - if t, ok := err.(TApplicationException); ok { - return NewTApplicationException(t.TypeId(), prepend+t.Error()) - } - - return errors.New(prepend + err.Error()) -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/field.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/field.go deleted file mode 100644 index 9d6652550..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/field.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Helper class that encapsulates field metadata. -type field struct { - name string - typeId TType - id int -} - -func newField(n string, t TType, i int) *field { - return &field{name: n, typeId: t, id: i} -} - -func (p *field) Name() string { - if p == nil { - return "" - } - return p.name -} - -func (p *field) TypeId() TType { - if p == nil { - return TType(VOID) - } - return p.typeId -} - -func (p *field) Id() int { - if p == nil { - return -1 - } - return p.id -} - -func (p *field) String() string { - if p == nil { - return "" - } - return "" -} - -var ANONYMOUS_FIELD *field - -type fieldSlice []field - -func (p fieldSlice) Len() int { - return len(p) -} - -func (p fieldSlice) Less(i, j int) bool { - return p[i].Id() < p[j].Id() -} - -func (p fieldSlice) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func init() { - ANONYMOUS_FIELD = newField("", STOP, 0) -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go deleted file mode 100644 index d0bae21bc..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "io" -) - -const DEFAULT_MAX_LENGTH = 16384000 - -type TFramedTransport struct { - transport TTransport - buf bytes.Buffer - reader *bufio.Reader - frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header - buffer [4]byte - maxLength uint32 -} - -type tFramedTransportFactory struct { - factory TTransportFactory - maxLength uint32 -} - -func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { - return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH} -} - -func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { - return &tFramedTransportFactory{factory: factory, maxLength: maxLength} -} - -func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport { - return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength) -} - -func NewTFramedTransport(transport TTransport) *TFramedTransport { - return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH} -} - -func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { - return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength} -} - -func (p *TFramedTransport) Open() error { - return p.transport.Open() -} - -func (p *TFramedTransport) IsOpen() bool { - return p.transport.IsOpen() -} - -func (p *TFramedTransport) Close() error { - return p.transport.Close() -} - -func (p *TFramedTransport) Read(buf []byte) (l int, err error) { - if p.frameSize == 0 { - p.frameSize, err = p.readFrameHeader() - if err != nil { - return - } - } - if p.frameSize < uint32(len(buf)) { - frameSize := p.frameSize - tmp := make([]byte, p.frameSize) - l, err = p.Read(tmp) - copy(buf, tmp) - if err == nil { - err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf))) - return - } - } - got, err := p.reader.Read(buf) - p.frameSize = p.frameSize - uint32(got) - //sanity check - if p.frameSize < 0 { - return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size") - } - return got, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) ReadByte() (c byte, err error) { - if p.frameSize == 0 { - p.frameSize, err = p.readFrameHeader() - if err != nil { - return - } - } - if p.frameSize < 1 { - return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1)) - } - c, err = p.reader.ReadByte() - if err == nil { - p.frameSize-- - } - return -} - -func (p *TFramedTransport) Write(buf []byte) (int, error) { - n, err := p.buf.Write(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) WriteByte(c byte) error { - return p.buf.WriteByte(c) -} - -func (p *TFramedTransport) WriteString(s string) (n int, err error) { - return p.buf.WriteString(s) -} - -func (p *TFramedTransport) Flush() error { - size := p.buf.Len() - buf := p.buffer[:4] - binary.BigEndian.PutUint32(buf, uint32(size)) - _, err := p.transport.Write(buf) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if size > 0 { - if n, err := p.buf.WriteTo(p.transport); err != nil { - print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n") - return NewTTransportExceptionFromError(err) - } - } - err = p.transport.Flush() - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) readFrameHeader() (uint32, error) { - buf := p.buffer[:4] - if _, err := io.ReadFull(p.reader, buf); err != nil { - return 0, err - } - size := binary.BigEndian.Uint32(buf) - if size < 0 || size > p.maxLength { - return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) - } - return size, nil -} - -func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { - return uint64(p.frameSize) -} - diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go deleted file mode 100644 index 88eb2c128..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -// Default to using the shared http client. Library users are -// free to change this global client or specify one through -// THttpClientOptions. -var DefaultHttpClient *http.Client = http.DefaultClient - -type THttpClient struct { - client *http.Client - response *http.Response - url *url.URL - requestBuffer *bytes.Buffer - header http.Header - nsecConnectTimeout int64 - nsecReadTimeout int64 -} - -type THttpClientTransportFactory struct { - options THttpClientOptions - url string - isPost bool -} - -func (p *THttpClientTransportFactory) GetTransport(trans TTransport) TTransport { - if trans != nil { - t, ok := trans.(*THttpClient) - if ok && t.url != nil { - if t.requestBuffer != nil { - t2, _ := NewTHttpPostClientWithOptions(t.url.String(), p.options) - return t2 - } - t2, _ := NewTHttpClientWithOptions(t.url.String(), p.options) - return t2 - } - } - if p.isPost { - s, _ := NewTHttpPostClientWithOptions(p.url, p.options) - return s - } - s, _ := NewTHttpClientWithOptions(p.url, p.options) - return s -} - -type THttpClientOptions struct { - // If nil, DefaultHttpClient is used - Client *http.Client -} - -func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return &THttpClientTransportFactory{url: url, isPost: false, options: options} -} - -func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpPostClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return &THttpClientTransportFactory{url: url, isPost: true, options: options} -} - -func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - parsedURL, err := url.Parse(urlstr) - if err != nil { - return nil, err - } - response, err := http.Get(urlstr) - if err != nil { - return nil, err - } - client := options.Client - if client == nil { - client = DefaultHttpClient - } - httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}} - return &THttpClient{client: client, response: response, url: parsedURL, header: httpHeader}, nil -} - -func NewTHttpClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} - -func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - parsedURL, err := url.Parse(urlstr) - if err != nil { - return nil, err - } - buf := make([]byte, 0, 1024) - client := options.Client - if client == nil { - client = DefaultHttpClient - } - httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}} - return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil -} - -func NewTHttpPostClient(urlstr string) (TTransport, error) { - return NewTHttpPostClientWithOptions(urlstr, THttpClientOptions{}) -} - -// Set the HTTP Header for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") -func (p *THttpClient) SetHeader(key string, value string) { - p.header.Add(key, value) -} - -// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// hdrValue := httpTrans.GetHeader("User-Agent") -func (p *THttpClient) GetHeader(key string) string { - return p.header.Get(key) -} - -// Deletes the HTTP Header given a Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.DelHeader("User-Agent") -func (p *THttpClient) DelHeader(key string) { - p.header.Del(key) -} - -func (p *THttpClient) Open() error { - // do nothing - return nil -} - -func (p *THttpClient) IsOpen() bool { - return p.response != nil || p.requestBuffer != nil -} - -func (p *THttpClient) closeResponse() error { - var err error - if p.response != nil && p.response.Body != nil { - // The docs specify that if keepalive is enabled and the response body is not - // read to completion the connection will never be returned to the pool and - // reused. Errors are being ignored here because if the connection is invalid - // and this fails for some reason, the Close() method will do any remaining - // cleanup. - io.Copy(ioutil.Discard, p.response.Body) - - err = p.response.Body.Close() - } - - p.response = nil - return err -} - -func (p *THttpClient) Close() error { - if p.requestBuffer != nil { - p.requestBuffer.Reset() - p.requestBuffer = nil - } - return p.closeResponse() -} - -func (p *THttpClient) Read(buf []byte) (int, error) { - if p.response == nil { - return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - n, err := p.response.Body.Read(buf) - if n > 0 && (err == nil || err == io.EOF) { - return n, nil - } - return n, NewTTransportExceptionFromError(err) -} - -func (p *THttpClient) ReadByte() (c byte, err error) { - return readByte(p.response.Body) -} - -func (p *THttpClient) Write(buf []byte) (int, error) { - n, err := p.requestBuffer.Write(buf) - return n, err -} - -func (p *THttpClient) WriteByte(c byte) error { - return p.requestBuffer.WriteByte(c) -} - -func (p *THttpClient) WriteString(s string) (n int, err error) { - return p.requestBuffer.WriteString(s) -} - -func (p *THttpClient) Flush() error { - // Close any previous response body to avoid leaking connections. - p.closeResponse() - - req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer) - if err != nil { - return NewTTransportExceptionFromError(err) - } - req.Header = p.header - response, err := p.client.Do(req) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if response.StatusCode != http.StatusOK { - // Close the response to avoid leaking file descriptors. closeResponse does - // more than just call Close(), so temporarily assign it and reuse the logic. - p.response = response - p.closeResponse() - - // TODO(pomack) log bad response - return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) - } - p.response = response - return nil -} - -func (p *THttpClient) RemainingBytes() (num_bytes uint64) { - len := p.response.ContentLength - if len >= 0 { - return uint64(len) - } - - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go deleted file mode 100644 index f6d7458db..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "net/http" - -// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerFunc(processor TProcessor, - inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - - transport := NewStreamTransport(r.Body, w) - processor.Process(inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - } -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go deleted file mode 100644 index 794872ff1..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "io" -) - -// StreamTransport is a Transport made of an io.Reader and/or an io.Writer -type StreamTransport struct { - io.Reader - io.Writer - isReadWriter bool - closed bool -} - -type StreamTransportFactory struct { - Reader io.Reader - Writer io.Writer - isReadWriter bool -} - -func (p *StreamTransportFactory) GetTransport(trans TTransport) TTransport { - if trans != nil { - t, ok := trans.(*StreamTransport) - if ok { - if t.isReadWriter { - return NewStreamTransportRW(t.Reader.(io.ReadWriter)) - } - if t.Reader != nil && t.Writer != nil { - return NewStreamTransport(t.Reader, t.Writer) - } - if t.Reader != nil && t.Writer == nil { - return NewStreamTransportR(t.Reader) - } - if t.Reader == nil && t.Writer != nil { - return NewStreamTransportW(t.Writer) - } - return &StreamTransport{} - } - } - if p.isReadWriter { - return NewStreamTransportRW(p.Reader.(io.ReadWriter)) - } - if p.Reader != nil && p.Writer != nil { - return NewStreamTransport(p.Reader, p.Writer) - } - if p.Reader != nil && p.Writer == nil { - return NewStreamTransportR(p.Reader) - } - if p.Reader == nil && p.Writer != nil { - return NewStreamTransportW(p.Writer) - } - return &StreamTransport{} -} - -func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { - return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} -} - -func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportR(r io.Reader) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r)} -} - -func NewStreamTransportW(w io.Writer) *StreamTransport { - return &StreamTransport{Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { - bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) - return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} -} - -func (p *StreamTransport) IsOpen() bool { - return !p.closed -} - -// implicitly opened on creation, can't be reopened once closed -func (p *StreamTransport) Open() error { - if !p.closed { - return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") - } else { - return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") - } -} - -// Closes both the input and output streams. -func (p *StreamTransport) Close() error { - if p.closed { - return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") - } - p.closed = true - closedReader := false - if p.Reader != nil { - c, ok := p.Reader.(io.Closer) - if ok { - e := c.Close() - closedReader = true - if e != nil { - return e - } - } - p.Reader = nil - } - if p.Writer != nil && (!closedReader || !p.isReadWriter) { - c, ok := p.Writer.(io.Closer) - if ok { - e := c.Close() - if e != nil { - return e - } - } - p.Writer = nil - } - return nil -} - -// Flushes the underlying output stream if not null. -func (p *StreamTransport) Flush() error { - if p.Writer == nil { - return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") - } - f, ok := p.Writer.(Flusher) - if ok { - err := f.Flush() - if err != nil { - return NewTTransportExceptionFromError(err) - } - } - return nil -} - -func (p *StreamTransport) Read(c []byte) (n int, err error) { - n, err = p.Reader.Read(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) ReadByte() (c byte, err error) { - f, ok := p.Reader.(io.ByteReader) - if ok { - c, err = f.ReadByte() - } else { - c, err = readByte(p.Reader) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) Write(c []byte) (n int, err error) { - n, err = p.Writer.Write(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteByte(c byte) (err error) { - f, ok := p.Writer.(io.ByteWriter) - if ok { - err = f.WriteByte(c) - } else { - err = writeByte(p.Writer, c) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteString(s string) (n int, err error) { - f, ok := p.Writer.(stringWriter) - if ok { - n, err = f.WriteString(s) - } else { - n, err = p.Writer.Write([]byte(s)) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} - diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go deleted file mode 100644 index 442fa9144..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go +++ /dev/null @@ -1,583 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/base64" - "fmt" -) - -const ( - THRIFT_JSON_PROTOCOL_VERSION = 1 -) - -// for references to _ParseContext see tsimplejson_protocol.go - -// JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured TJSONProtocol. -// -type TJSONProtocol struct { - *TSimpleJSONProtocol -} - -// Constructor -func NewTJSONProtocol(t TTransport) *TJSONProtocol { - v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) - return v -} - -// Factory -type TJSONProtocolFactory struct{} - -func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTJSONProtocol(trans) -} - -func NewTJSONProtocolFactory() *TJSONProtocolFactory { - return &TJSONProtocolFactory{} -} - -func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil { - return e - } - if e := p.WriteString(name); e != nil { - return e - } - if e := p.WriteByte(int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(seqId); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteMessageEnd() error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteStructBegin(name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteStructEnd() error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if e := p.WriteI16(id); e != nil { - return e - } - if e := p.OutputObjectBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(typeId) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteFieldEnd() error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldStop() error { return nil } - -func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(keyType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - s, e1 = p.TypeIdToString(valueType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return p.OutputObjectBegin() -} - -func (p *TJSONProtocol) WriteMapEnd() error { - if e := p.OutputObjectEnd(); e != nil { - return e - } - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteListEnd() error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteSetEnd() error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteBool(b bool) error { - if b { - return p.WriteI32(1) - } - return p.WriteI32(0) -} - -func (p *TJSONProtocol) WriteByte(b int8) error { - return p.WriteI32(int32(b)) -} - -func (p *TJSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) -} - -func (p *TJSONProtocol) WriteI32(v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteI64(v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteDouble(v float64) error { - return p.OutputF64(v) -} - -func (p *TJSONProtocol) WriteString(v string) error { - return p.OutputString(v) -} - -func (p *TJSONProtocol) WriteBinary(v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - version, err := p.ReadI32() - if err != nil { - return name, typeId, seqId, err - } - if version != THRIFT_JSON_PROTOCOL_VERSION { - e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) - return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) - - } - if name, err = p.ReadString(); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte() - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TJSONProtocol) ReadMessageEnd() error { - err := p.ParseListEnd() - return err -} - -func (p *TJSONProtocol) ReadStructBegin() (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TJSONProtocol) ReadStructEnd() error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { - b, _ := p.reader.Peek(1) - if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { - return "", STOP, -1, nil - } - fieldId, err := p.ReadI16() - if err != nil { - return "", STOP, fieldId, err - } - if _, err = p.ParseObjectStart(); err != nil { - return "", STOP, fieldId, err - } - sType, err := p.ReadString() - if err != nil { - return "", STOP, fieldId, err - } - fType, err := p.StringToTypeId(sType) - return "", fType, fieldId, err -} - -func (p *TJSONProtocol) ReadFieldEnd() error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - sKeyType, e := p.ReadString() - if e != nil { - return keyType, valueType, size, e - } - keyType, e = p.StringToTypeId(sKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - sValueType, e := p.ReadString() - if e != nil { - return keyType, valueType, size, e - } - valueType, e = p.StringToTypeId(sValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, e := p.ReadI64() - if e != nil { - return keyType, valueType, size, e - } - size = int(iSize) - - _, e = p.ParseObjectStart() - return keyType, valueType, size, e -} - -func (p *TJSONProtocol) ReadMapEnd() error { - e := p.ParseObjectEnd() - if e != nil { - return e - } - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadListEnd() error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadSetEnd() error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadBool() (bool, error) { - value, err := p.ReadI32() - return (value != 0), err -} - -func (p *TJSONProtocol) ReadByte() (int8, error) { - v, err := p.ReadI64() - return int8(v), err -} - -func (p *TJSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() - return int16(v), err -} - -func (p *TJSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() - return int32(v), err -} - -func (p *TJSONProtocol) ReadI64() (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TJSONProtocol) ReadDouble() (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TJSONProtocol) ReadString() (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) ReadBinary() ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) Flush() (err error) { - err = p.writer.Flush() - if err == nil { - err = p.trans.Flush() - } - return NewTProtocolException(err) -} - -func (p *TJSONProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - sElemType, err := p.ReadString() - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - sElemType, err := p.ReadString() - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.OutputString(s); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { - switch byte(fieldType) { - case BOOL: - return "tf", nil - case BYTE: - return "i8", nil - case I16: - return "i16", nil - case I32: - return "i32", nil - case I64: - return "i64", nil - case DOUBLE: - return "dbl", nil - case STRING: - return "str", nil - case STRUCT: - return "rec", nil - case MAP: - return "map", nil - case SET: - return "set", nil - case LIST: - return "lst", nil - } - - e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { - switch fieldType { - case "tf": - return TType(BOOL), nil - case "i8": - return TType(BYTE), nil - case "i16": - return TType(I16), nil - case "i32": - return TType(I32), nil - case "i64": - return TType(I64), nil - case "dbl": - return TType(DOUBLE), nil - case "str": - return TType(STRING), nil - case "rec": - return TType(STRUCT), nil - case "map": - return TType(MAP), nil - case "set": - return TType(SET), nil - case "lst": - return TType(LIST), nil - } - - e := fmt.Errorf("Unknown type identifier: %s", fieldType) - return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go deleted file mode 100644 index 3157e0d5d..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "fmt" - "strings" -) - -/* -TMultiplexedProtocol is a protocol-independent concrete decorator -that allows a Thrift client to communicate with a multiplexing Thrift server, -by prepending the service name to the function name during function calls. - -NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request -from a multiplexing client. - -This example uses a single socket transport to invoke two services: - -socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) -transport := thrift.NewTFramedTransport(socket) -protocol := thrift.NewTBinaryProtocolTransport(transport) - -mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") -service := Calculator.NewCalculatorClient(mp) - -mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") -service2 := WeatherReport.NewWeatherReportClient(mp2) - -err := transport.Open() -if err != nil { - t.Fatal("Unable to open client socket", err) -} - -fmt.Println(service.Add(2,2)) -fmt.Println(service2.GetTemperature()) -*/ - -type TMultiplexedProtocol struct { - TProtocol - serviceName string -} - -const MULTIPLEXED_SEPARATOR = ":" - -func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { - return &TMultiplexedProtocol{ - TProtocol: protocol, - serviceName: serviceName, - } -} - -func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { - if typeId == CALL || typeId == ONEWAY { - return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) - } else { - return t.TProtocol.WriteMessageBegin(name, typeId, seqid) - } -} - -/* -TMultiplexedProcessor is a TProcessor allowing -a single TServer to provide multiple services. - -To do so, you instantiate the processor and then register additional -processors with it, as shown in the following example: - -var processor = thrift.NewTMultiplexedProcessor() - -firstProcessor := -processor.RegisterProcessor("FirstService", firstProcessor) - -processor.registerProcessor( - "Calculator", - Calculator.NewCalculatorProcessor(&CalculatorHandler{}), -) - -processor.registerProcessor( - "WeatherReport", - WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), -) - -serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) -if err != nil { - t.Fatal("Unable to create server socket", err) -} -server := thrift.NewTSimpleServer2(processor, serverTransport) -server.Serve(); -*/ - -type TMultiplexedProcessor struct { - serviceProcessorMap map[string]TProcessor - DefaultProcessor TProcessor -} - -func NewTMultiplexedProcessor() *TMultiplexedProcessor { - return &TMultiplexedProcessor{ - serviceProcessorMap: make(map[string]TProcessor), - } -} - -func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { - t.DefaultProcessor = processor -} - -func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { - if t.serviceProcessorMap == nil { - t.serviceProcessorMap = make(map[string]TProcessor) - } - t.serviceProcessorMap[name] = processor -} - -func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) { - name, typeId, seqid, err := in.ReadMessageBegin() - if err != nil { - return false, err - } - if typeId != CALL && typeId != ONEWAY { - return false, fmt.Errorf("Unexpected message type %v", typeId) - } - //extract the service name - v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) - if len(v) != 2 { - if t.DefaultProcessor != nil { - smb := NewStoredMessageProtocol(in, name, typeId, seqid) - return t.DefaultProcessor.Process(smb, out) - } - return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) - } - actualProcessor, ok := t.serviceProcessorMap[v[0]] - if !ok { - return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) - } - smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) - return actualProcessor.Process(smb, out) -} - -//Protocol that use stored message for ReadMessageBegin -type storedMessageProtocol struct { - TProtocol - name string - typeId TMessageType - seqid int32 -} - -func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { - return &storedMessageProtocol{protocol, name, typeId, seqid} -} - -func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { - return s.name, s.typeId, s.seqid, nil -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go deleted file mode 100644 index aa8daa9b5..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "math" - "strconv" -) - -type Numeric interface { - Int64() int64 - Int32() int32 - Int16() int16 - Byte() byte - Int() int - Float64() float64 - Float32() float32 - String() string - isNull() bool -} - -type numeric struct { - iValue int64 - dValue float64 - sValue string - isNil bool -} - -var ( - INFINITY Numeric - NEGATIVE_INFINITY Numeric - NAN Numeric - ZERO Numeric - NUMERIC_NULL Numeric -) - -func NewNumericFromDouble(dValue float64) Numeric { - if math.IsInf(dValue, 1) { - return INFINITY - } - if math.IsInf(dValue, -1) { - return NEGATIVE_INFINITY - } - if math.IsNaN(dValue) { - return NAN - } - iValue := int64(dValue) - sValue := strconv.FormatFloat(dValue, 'g', 10, 64) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI64(iValue int64) Numeric { - dValue := float64(iValue) - sValue := string(iValue) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI32(iValue int32) Numeric { - dValue := float64(iValue) - sValue := string(iValue) - isNil := false - return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromString(sValue string) Numeric { - if sValue == INFINITY.String() { - return INFINITY - } - if sValue == NEGATIVE_INFINITY.String() { - return NEGATIVE_INFINITY - } - if sValue == NAN.String() { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - isNil := len(sValue) == 0 - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromJSONString(sValue string, isNull bool) Numeric { - if isNull { - return NewNullNumeric() - } - if sValue == JSON_INFINITY { - return INFINITY - } - if sValue == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY - } - if sValue == JSON_NAN { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} -} - -func NewNullNumeric() Numeric { - return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} -} - -func (p *numeric) Int64() int64 { - return p.iValue -} - -func (p *numeric) Int32() int32 { - return int32(p.iValue) -} - -func (p *numeric) Int16() int16 { - return int16(p.iValue) -} - -func (p *numeric) Byte() byte { - return byte(p.iValue) -} - -func (p *numeric) Int() int { - return int(p.iValue) -} - -func (p *numeric) Float64() float64 { - return p.dValue -} - -func (p *numeric) Float32() float32 { - return float32(p.dValue) -} - -func (p *numeric) String() string { - return p.sValue -} - -func (p *numeric) isNull() bool { - return p.isNil -} - -func init() { - INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} - NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} - NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} - ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} - NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go deleted file mode 100644 index 8d6b2c215..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -/////////////////////////////////////////////////////////////////////////////// -// This file is home to helpers that convert from various base types to -// respective pointer types. This is necessary because Go does not permit -// references to constants, nor can a pointer type to base type be allocated -// and initialized in a single expression. -// -// E.g., this is not allowed: -// -// var ip *int = &5 -// -// But this *is* allowed: -// -// func IntPtr(i int) *int { return &i } -// var ip *int = IntPtr(5) -// -// Since pointers to base types are commonplace as [optional] fields in -// exported thrift structs, we factor such helpers here. -/////////////////////////////////////////////////////////////////////////////// - -func Float32Ptr(v float32) *float32 { return &v } -func Float64Ptr(v float64) *float64 { return &v } -func IntPtr(v int) *int { return &v } -func Int32Ptr(v int32) *int32 { return &v } -func Int64Ptr(v int64) *int64 { return &v } -func StringPtr(v string) *string { return &v } -func Uint32Ptr(v uint32) *uint32 { return &v } -func Uint64Ptr(v uint64) *uint64 { return &v } -func BoolPtr(v bool) *bool { return &v } -func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/processor.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/processor.go deleted file mode 100644 index ca0d3faf2..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/processor.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// A processor is a generic object which operates upon an input stream and -// writes to some output stream. -type TProcessor interface { - Process(in, out TProtocol) (bool, TException) -} - -type TProcessorFunction interface { - Process(seqId int32, in, out TProtocol) (bool, TException) -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go deleted file mode 100644 index 9d645df24..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// The default processor factory just returns a singleton -// instance. -type TProcessorFactory interface { - GetProcessor(trans TTransport) TProcessor -} - -type tProcessorFactory struct { - processor TProcessor -} - -func NewTProcessorFactory(p TProcessor) TProcessorFactory { - return &tProcessorFactory{processor: p} -} - -func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { - return p.processor -} - -/** - * The default processor factory just returns a singleton - * instance. - */ -type TProcessorFunctionFactory interface { - GetProcessorFunction(trans TTransport) TProcessorFunction -} - -type tProcessorFunctionFactory struct { - processor TProcessorFunction -} - -func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { - return &tProcessorFunctionFactory{processor: p} -} - -func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { - return p.processor -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go deleted file mode 100644 index 45fa202e7..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" -) - -const ( - VERSION_MASK = 0xffff0000 - VERSION_1 = 0x80010000 -) - -type TProtocol interface { - WriteMessageBegin(name string, typeId TMessageType, seqid int32) error - WriteMessageEnd() error - WriteStructBegin(name string) error - WriteStructEnd() error - WriteFieldBegin(name string, typeId TType, id int16) error - WriteFieldEnd() error - WriteFieldStop() error - WriteMapBegin(keyType TType, valueType TType, size int) error - WriteMapEnd() error - WriteListBegin(elemType TType, size int) error - WriteListEnd() error - WriteSetBegin(elemType TType, size int) error - WriteSetEnd() error - WriteBool(value bool) error - WriteByte(value int8) error - WriteI16(value int16) error - WriteI32(value int32) error - WriteI64(value int64) error - WriteDouble(value float64) error - WriteString(value string) error - WriteBinary(value []byte) error - - ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) - ReadMessageEnd() error - ReadStructBegin() (name string, err error) - ReadStructEnd() error - ReadFieldBegin() (name string, typeId TType, id int16, err error) - ReadFieldEnd() error - ReadMapBegin() (keyType TType, valueType TType, size int, err error) - ReadMapEnd() error - ReadListBegin() (elemType TType, size int, err error) - ReadListEnd() error - ReadSetBegin() (elemType TType, size int, err error) - ReadSetEnd() error - ReadBool() (value bool, err error) - ReadByte() (value int8, err error) - ReadI16() (value int16, err error) - ReadI32() (value int32, err error) - ReadI64() (value int64, err error) - ReadDouble() (value float64, err error) - ReadString() (value string, err error) - ReadBinary() (value []byte, err error) - - Skip(fieldType TType) (err error) - Flush() (err error) - - Transport() TTransport -} - -// The maximum recursive depth the skip() function will traverse -const DEFAULT_RECURSION_DEPTH = 64 - -// Skips over the next data element from the provided input TProtocol object. -func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) { - return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) -} - -// Skips over the next data element from the provided input TProtocol object. -func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) { - - if maxDepth <= 0 { - return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded")) - } - - switch fieldType { - case STOP: - return - case BOOL: - _, err = self.ReadBool() - return - case BYTE: - _, err = self.ReadByte() - return - case I16: - _, err = self.ReadI16() - return - case I32: - _, err = self.ReadI32() - return - case I64: - _, err = self.ReadI64() - return - case DOUBLE: - _, err = self.ReadDouble() - return - case STRING: - _, err = self.ReadString() - return - case STRUCT: - if _, err = self.ReadStructBegin(); err != nil { - return err - } - for { - _, typeId, _, _ := self.ReadFieldBegin() - if typeId == STOP { - break - } - err := Skip(self, typeId, maxDepth-1) - if err != nil { - return err - } - self.ReadFieldEnd() - } - return self.ReadStructEnd() - case MAP: - keyType, valueType, size, err := self.ReadMapBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, keyType, maxDepth-1) - if err != nil { - return err - } - self.Skip(valueType) - } - return self.ReadMapEnd() - case SET: - elemType, size, err := self.ReadSetBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadSetEnd() - case LIST: - elemType, size, err := self.ReadListBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadListEnd() - } - return nil -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go deleted file mode 100644 index 6e357ee89..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/base64" -) - -// Thrift Protocol exception -type TProtocolException interface { - TException - TypeId() int -} - -const ( - UNKNOWN_PROTOCOL_EXCEPTION = 0 - INVALID_DATA = 1 - NEGATIVE_SIZE = 2 - SIZE_LIMIT = 3 - BAD_VERSION = 4 - NOT_IMPLEMENTED = 5 - DEPTH_LIMIT = 6 -) - -type tProtocolException struct { - typeId int - message string -} - -func (p *tProtocolException) TypeId() int { - return p.typeId -} - -func (p *tProtocolException) String() string { - return p.message -} - -func (p *tProtocolException) Error() string { - return p.message -} - -func NewTProtocolException(err error) TProtocolException { - if err == nil { - return nil - } - if e,ok := err.(TProtocolException); ok { - return e - } - if _, ok := err.(base64.CorruptInputError); ok { - return &tProtocolException{INVALID_DATA, err.Error()} - } - return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} -} - -func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { - if err == nil { - return nil - } - return &tProtocolException{errType, err.Error()} -} - diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go deleted file mode 100644 index 771222999..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TSerializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -type TStruct interface { - Write(p TProtocol) error - Read(p TProtocol) error -} - -func NewTSerializer() *TSerializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &TSerializer{ - transport, - protocol} -} - -func (t *TSerializer) WriteString(msg TStruct) (s string, err error) { - t.Transport.Reset() - - if err = msg.Write(t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(); err != nil { - return - } - if err = t.Transport.Flush(); err != nil { - return - } - - return t.Transport.String(), nil -} - -func (t *TSerializer) Write(msg TStruct) (b []byte, err error) { - t.Transport.Reset() - - if err = msg.Write(t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(); err != nil { - return - } - - if err = t.Transport.Flush(); err != nil { - return - } - - b = append(b, t.Transport.Bytes()...) - return -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/server.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/server.go deleted file mode 100644 index f813fa353..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/server.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TServer interface { - ProcessorFactory() TProcessorFactory - ServerTransport() TServerTransport - InputTransportFactory() TTransportFactory - OutputTransportFactory() TTransportFactory - InputProtocolFactory() TProtocolFactory - OutputProtocolFactory() TProtocolFactory - - // Starts the server - Serve() error - // Stops the server. This is optional on a per-implementation basis. Not - // all servers are required to be cleanly stoppable. - Stop() error -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go deleted file mode 100644 index d6e9495d2..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" - "sync" - "time" -) - -type TServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - - // Protects the interrupted value to make it thread safe. - mu sync.RWMutex - interrupted bool -} - -func NewTServerSocket(listenAddr string) (*TServerSocket, error) { - return NewTServerSocketTimeout(listenAddr, 0) -} - -func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil -} - -func (p *TServerSocket) Listen() error { - if p.IsListening() { - return nil - } - l, err := net.Listen(p.addr.Network(), p.addr.String()) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TServerSocket) Accept() (TTransport, error) { - p.mu.RLock() - interrupted := p.interrupted - p.mu.RUnlock() - - if interrupted { - return nil, errTransportInterrupted - } - if p.listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - conn, err := p.listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TServerSocket) Open() error { - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TServerSocket) Addr() net.Addr { - if p.listener != nil { - return p.listener.Addr() - } - return p.addr -} - -func (p *TServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *TServerSocket) Interrupt() error { - p.mu.Lock() - p.interrupted = true - p.Close() - p.mu.Unlock() - - return nil -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go deleted file mode 100644 index 4097c4aea..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "log" - "runtime/debug" - "sync" -) - -// Simple, non-concurrent server for testing. -type TSimpleServer struct { - quit chan struct{} - - processorFactory TProcessorFactory - serverTransport TServerTransport - inputTransportFactory TTransportFactory - outputTransportFactory TTransportFactory - inputProtocolFactory TProtocolFactory - outputProtocolFactory TProtocolFactory -} - -func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) -} - -func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory4(NewTProcessorFactory(processor), - serverTransport, - transportFactory, - protocolFactory, - ) -} - -func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(NewTProcessorFactory(processor), - serverTransport, - inputTransportFactory, - outputTransportFactory, - inputProtocolFactory, - outputProtocolFactory, - ) -} - -func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - NewTTransportFactory(), - NewTTransportFactory(), - NewTBinaryProtocolFactoryDefault(), - NewTBinaryProtocolFactoryDefault(), - ) -} - -func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - transportFactory, - transportFactory, - protocolFactory, - protocolFactory, - ) -} - -func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return &TSimpleServer{ - processorFactory: processorFactory, - serverTransport: serverTransport, - inputTransportFactory: inputTransportFactory, - outputTransportFactory: outputTransportFactory, - inputProtocolFactory: inputProtocolFactory, - outputProtocolFactory: outputProtocolFactory, - quit: make(chan struct{}, 1), - } -} - -func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { - return p.processorFactory -} - -func (p *TSimpleServer) ServerTransport() TServerTransport { - return p.serverTransport -} - -func (p *TSimpleServer) InputTransportFactory() TTransportFactory { - return p.inputTransportFactory -} - -func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { - return p.outputTransportFactory -} - -func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { - return p.inputProtocolFactory -} - -func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { - return p.outputProtocolFactory -} - -func (p *TSimpleServer) Listen() error { - return p.serverTransport.Listen() -} - -func (p *TSimpleServer) AcceptLoop() error { - for { - client, err := p.serverTransport.Accept() - if err != nil { - select { - case <-p.quit: - return nil - default: - } - return err - } - if client != nil { - go func() { - if err := p.processRequests(client); err != nil { - log.Println("error processing request:", err) - } - }() - } - } -} - -func (p *TSimpleServer) Serve() error { - err := p.Listen() - if err != nil { - return err - } - p.AcceptLoop() - return nil -} - -var once sync.Once - -func (p *TSimpleServer) Stop() error { - q := func() { - p.quit <- struct{}{} - p.serverTransport.Interrupt() - } - once.Do(q) - return nil -} - -func (p *TSimpleServer) processRequests(client TTransport) error { - processor := p.processorFactory.GetProcessor(client) - inputTransport := p.inputTransportFactory.GetTransport(client) - outputTransport := p.outputTransportFactory.GetTransport(client) - inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) - outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport) - defer func() { - if e := recover(); e != nil { - log.Printf("panic in processor: %s: %s", e, debug.Stack()) - } - }() - if inputTransport != nil { - defer inputTransport.Close() - } - if outputTransport != nil { - defer outputTransport.Close() - } - for { - ok, err := processor.Process(inputProtocol, outputProtocol) - if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE { - return nil - } else if err != nil { - log.Printf("error processing request: %s", err) - return err - } - if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD { - continue - } - if !ok { - break - } - } - return nil -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/socket.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/socket.go deleted file mode 100644 index 82e28b4b1..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/socket.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" - "time" -) - -type TSocket struct { - conn net.Conn - addr net.Addr - timeout time.Duration -} - -// NewTSocket creates a net.Conn-backed TTransport, given a host and port -// -// Example: -// trans, err := thrift.NewTSocket("localhost:9090") -func NewTSocket(hostPort string) (*TSocket, error) { - return NewTSocketTimeout(hostPort, 0) -} - -// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port -// it also accepts a timeout as a time.Duration -func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) { - //conn, err := net.DialTimeout(network, address, timeout) - addr, err := net.ResolveTCPAddr("tcp", hostPort) - if err != nil { - return nil, err - } - return NewTSocketFromAddrTimeout(addr, timeout), nil -} - -// Creates a TSocket from a net.Addr -func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket { - return &TSocket{addr: addr, timeout: timeout} -} - -// Creates a TSocket from an existing net.Conn -func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket { - return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout} -} - -// Sets the socket timeout -func (p *TSocket) SetTimeout(timeout time.Duration) error { - p.timeout = timeout - return nil -} - -func (p *TSocket) pushDeadline(read, write bool) { - var t time.Time - if p.timeout > 0 { - t = time.Now().Add(time.Duration(p.timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSocket) Open() error { - if p.IsOpen() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - var err error - if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSocket) IsOpen() bool { - if p.conn == nil { - return false - } - return true -} - -// Closes the socket. -func (p *TSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -//Returns the remote address of the socket. -func (p *TSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSocket) Read(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSocket) Write(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSocket) Flush() error { - return nil -} - -func (p *TSocket) Interrupt() error { - if !p.IsOpen() { - return nil - } - return p.conn.Close() -} - -func (p *TSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} - diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go deleted file mode 100644 index 58f859b0e..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" - "time" - "crypto/tls" -) - -type TSSLServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - interrupted bool - cfg *tls.Config -} - -func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { - return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) -} - -func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil -} - -func (p *TSSLServerSocket) Listen() error { - if p.IsListening() { - return nil - } - l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TSSLServerSocket) Accept() (TTransport, error) { - if p.interrupted { - return nil, errTransportInterrupted - } - if p.listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - conn, err := p.listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TSSLServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLServerSocket) Open() error { - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TSSLServerSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSSLServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *TSSLServerSocket) Interrupt() error { - p.interrupted = true - return nil -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go deleted file mode 100644 index 04d385085..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "crypto/tls" - "net" - "time" -) - -type TSSLSocket struct { - conn net.Conn - // hostPort contains host:port (e.g. "asdf.com:12345"). The field is - // only valid if addr is nil. - hostPort string - // addr is nil when hostPort is not "", and is only used when the - // TSSLSocket is constructed from a net.Addr. - addr net.Addr - timeout time.Duration - cfg *tls.Config -} - -// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration -// -// Example: -// trans, err := thrift.NewTSSLSocket("localhost:9090", nil) -func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { - return NewTSSLSocketTimeout(hostPort, cfg, 0) -} - -// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port -// it also accepts a tls Configuration and a timeout as a time.Duration -func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) { - return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil -} - -// Creates a TSSLSocket from a net.Addr -func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket { - return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg} -} - -// Creates a TSSLSocket from an existing net.Conn -func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket { - return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg} -} - -// Sets the socket timeout -func (p *TSSLSocket) SetTimeout(timeout time.Duration) error { - p.timeout = timeout - return nil -} - -func (p *TSSLSocket) pushDeadline(read, write bool) { - var t time.Time - if p.timeout > 0 { - t = time.Now().Add(time.Duration(p.timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLSocket) Open() error { - var err error - // If we have a hostname, we need to pass the hostname to tls.Dial for - // certificate hostname checks. - if p.hostPort != "" { - if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } else { - if p.IsOpen() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSSLSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSSLSocket) IsOpen() bool { - if p.conn == nil { - return false - } - return true -} - -// Closes the socket. -func (p *TSSLSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -func (p *TSSLSocket) Read(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSSLSocket) Write(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSSLSocket) Flush() error { - return nil -} - -func (p *TSSLSocket) Interrupt() error { - if !p.IsOpen() { - return nil - } - return p.conn.Close() -} - -func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} - diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go deleted file mode 100644 index 9505b4461..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type timeoutable interface { - Timeout() bool -} - -// Thrift Transport exception -type TTransportException interface { - TException - TypeId() int - Err() error -} - -const ( - UNKNOWN_TRANSPORT_EXCEPTION = 0 - NOT_OPEN = 1 - ALREADY_OPEN = 2 - TIMED_OUT = 3 - END_OF_FILE = 4 -) - -type tTransportException struct { - typeId int - err error -} - -func (p *tTransportException) TypeId() int { - return p.typeId -} - -func (p *tTransportException) Error() string { - return p.err.Error() -} - -func (p *tTransportException) Err() error { - return p.err -} - -func NewTTransportException(t int, e string) TTransportException { - return &tTransportException{typeId: t, err: errors.New(e)} -} - -func NewTTransportExceptionFromError(e error) TTransportException { - if e == nil { - return nil - } - - if t, ok := e.(TTransportException); ok { - return t - } - - switch v := e.(type) { - case TTransportException: - return v - case timeoutable: - if v.Timeout() { - return &tTransportException{typeId: TIMED_OUT, err: e} - } - } - - if e == io.EOF { - return &tTransportException{typeId: END_OF_FILE, err: e} - } - - return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e} -} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go b/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go deleted file mode 100644 index e47455fe1..000000000 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. - */ - -package thrift - -import ( - "compress/zlib" - "io" - "log" -) - -// TZlibTransportFactory is a factory for TZlibTransport instances -type TZlibTransportFactory struct { - level int -} - -// TZlibTransport is a TTransport implementation that makes use of zlib compression. -type TZlibTransport struct { - reader io.ReadCloser - transport TTransport - writer *zlib.Writer -} - -// GetTransport constructs a new instance of NewTZlibTransport -func (p *TZlibTransportFactory) GetTransport(trans TTransport) TTransport { - t, _ := NewTZlibTransport(trans, p.level) - return t -} - -// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory -func NewTZlibTransportFactory(level int) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level} -} - -// NewTZlibTransport constructs a new instance of TZlibTransport -func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { - w, err := zlib.NewWriterLevel(trans, level) - if err != nil { - log.Println(err) - return nil, err - } - - return &TZlibTransport{ - writer: w, - transport: trans, - }, nil -} - -// Close closes the reader and writer (flushing any unwritten data) and closes -// the underlying transport. -func (z *TZlibTransport) Close() error { - if z.reader != nil { - if err := z.reader.Close(); err != nil { - return err - } - } - if err := z.writer.Close(); err != nil { - return err - } - return z.transport.Close() -} - -// Flush flushes the writer and its underlying transport. -func (z *TZlibTransport) Flush() error { - if err := z.writer.Flush(); err != nil { - return err - } - return z.transport.Flush() -} - -// IsOpen returns true if the transport is open -func (z *TZlibTransport) IsOpen() bool { - return z.transport.IsOpen() -} - -// Open opens the transport for communication -func (z *TZlibTransport) Open() error { - return z.transport.Open() -} - -func (z *TZlibTransport) Read(p []byte) (int, error) { - if z.reader == nil { - r, err := zlib.NewReader(z.transport) - if err != nil { - return 0, NewTTransportExceptionFromError(err) - } - z.reader = r - } - - return z.reader.Read(p) -} - -// RemainingBytes returns the size in bytes of the data that is still to be -// read. -func (z *TZlibTransport) RemainingBytes() uint64 { - return z.transport.RemainingBytes() -} - -func (z *TZlibTransport) Write(p []byte) (int, error) { - return z.writer.Write(p) -} diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/.gitignore b/hotelReservation/vendor/github.com/armon/go-metrics/.gitignore new file mode 100644 index 000000000..8c03ec112 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +/metrics.out diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/LICENSE b/hotelReservation/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 000000000..106569e54 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/README.md b/hotelReservation/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 000000000..aa73348c0 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,91 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +----- + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Labels +------ + +Most metrics do have an equivalent ending with `WithLabels`, such methods +allow to push metrics with labels and use some features of underlying Sinks +(ex: translated into Prometheus labels). + +Since some of these labels may increase greatly cardinality of metrics, the +library allow to filter labels using a blacklist/whitelist filtering system +which is global to all metrics. + +* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. +* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. + +By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that +no tags are filetered at all, but it allow to a user to globally block some tags with high +cardinality at application level. + +Examples +-------- + +Here is an example of using the package: + +```go +func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) +} + +// Configure a statsite sink as the global metrics sink +sink, _ := metrics.NewStatsiteSink("statsite:8125") +metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + +// Emit a Key/Value pair +metrics.EmitKey([]string{"questions", "meaning of life"}, 42) +``` + +Here is an example of setting up a signal handler: + +```go +// Setup the inmem sink and signal handler +inm := metrics.NewInmemSink(10*time.Second, time.Minute) +sig := metrics.DefaultInmemSignal(inm) +metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + +// Run some code +inm.SetGauge([]string{"foo"}, 42) +inm.EmitKey([]string{"bar"}, 30) + +inm.IncrCounter([]string{"baz"}, 42) +inm.IncrCounter([]string{"baz"}, 1) +inm.IncrCounter([]string{"baz"}, 80) + +inm.AddSample([]string{"method", "wow"}, 42) +inm.AddSample([]string{"method", "wow"}, 100) +inm.AddSample([]string{"method", "wow"}, 22) + +.... +``` + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/const_unix.go b/hotelReservation/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 000000000..31098dd57 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/const_windows.go b/hotelReservation/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 000000000..38136af3e --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/inmem.go b/hotelReservation/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 000000000..4e2d6a709 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,348 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v + } + current.RUnlock() + + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + key := buf.String() + + for _, label := range labels { + replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/hotelReservation/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 000000000..504f1b374 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,118 @@ +package metrics + +import ( + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = i.intervals[0] + default: + // Show the most recent finished interval if we have one + interval = i.intervals[n-2] + } + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary, nil +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/inmem_signal.go b/hotelReservation/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 000000000..0937f4aed --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/metrics.go b/hotelReservation/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 000000000..cf9def748 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,278 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.SetGaugeWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + allowed, _ := m.allowMetric(key, nil) + if !allowed { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.IncrCounterWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.AddSampleWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labelsFiltered) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) +} + +// UpdateFilterAndLabels overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + if allowedLabels == nil { + // Having a white list means we take only elements from it + m.allowedLabels = nil + } else { + m.allowedLabels = make(map[string]bool) + for _, v := range allowedLabels { + m.allowedLabels[v] = true + } + } + m.blockedLabels = make(map[string]bool) + for _, v := range blockedLabels { + m.blockedLabels[v] = true + } + m.AllowedLabels = allowedLabels + m.BlockedLabels = blockedLabels + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +// labelIsAllowed return true if a should be included in metric +// the caller should lock m.filterLock while calling this method +func (m *Metrics) labelIsAllowed(label *Label) bool { + labelName := (*label).Name + if m.blockedLabels != nil { + _, ok := m.blockedLabels[labelName] + if ok { + // If present, let's remove this label + return false + } + } + if m.allowedLabels != nil { + _, ok := m.allowedLabels[labelName] + return ok + } + // Allow by default + return true +} + +// filterLabels return only allowed labels +// the caller should lock m.filterLock while calling this method +func (m *Metrics) filterLabels(labels []Label) []Label { + if labels == nil { + return nil + } + toReturn := labels[:0] + for _, label := range labels { + if m.labelIsAllowed(&label) { + toReturn = append(toReturn, label) + } + } + return toReturn +} + +// Returns whether the metric should be allowed based on configured prefix filters +// Also return the applicable labels +func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + return allowed.(bool), m.filterLabels(labels) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/sink.go b/hotelReservation/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 000000000..0b7d6e4be --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/start.go b/hotelReservation/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 000000000..32a28c483 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,141 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + AllowedLabels []string // A list of metric labels to allow, with '.' as the separator + BlockedLabels []string // A list of metric labels to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + allowedLabels map[string]bool + blockedLabels map[string]bool + filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} + +// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels +// and blockedLabels - when not nil - allow filtering of labels in order to +// block/allow globally labels (especially useful when having large number of +// values for a given label). See README.md for more information about usage. +func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) +} diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/statsd.go b/hotelReservation/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 000000000..1bfffce46 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/hotelReservation/vendor/github.com/armon/go-metrics/statsite.go b/hotelReservation/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 000000000..6c0d284d2 --- /dev/null +++ b/hotelReservation/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/hotelReservation/vendor/github.com/bradfitz/gomemcache/AUTHORS b/hotelReservation/vendor/github.com/bradfitz/gomemcache/AUTHORS new file mode 100644 index 000000000..86ca62074 --- /dev/null +++ b/hotelReservation/vendor/github.com/bradfitz/gomemcache/AUTHORS @@ -0,0 +1,9 @@ +The following people & companies are the copyright holders of this +package. Feel free to add to this list if you or your employer cares, +otherwise it's implicit from the git log. + +Authors: + +- Brad Fitzpatrick +- Google, Inc. (from Googlers contributing) +- Anybody else in the git log. diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/hs/LICENSE b/hotelReservation/vendor/github.com/bradfitz/gomemcache/LICENSE similarity index 100% rename from hotelReservation/vendor/github.com/apache/thrift/lib/hs/LICENSE rename to hotelReservation/vendor/github.com/bradfitz/gomemcache/LICENSE diff --git a/hotelReservation/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go b/hotelReservation/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go new file mode 100644 index 000000000..b2ebacd2a --- /dev/null +++ b/hotelReservation/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go @@ -0,0 +1,776 @@ +/* +Copyright 2011 The gomemcache AUTHORS + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package memcache provides a client for the memcached cache server. +package memcache + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Similar to: +// https://godoc.org/google.golang.org/appengine/memcache + +var ( + // ErrCacheMiss means that a Get failed because the item wasn't present. + ErrCacheMiss = errors.New("memcache: cache miss") + + // ErrCASConflict means that a CompareAndSwap call failed due to the + // cached value being modified between the Get and the CompareAndSwap. + // If the cached value was simply evicted rather than replaced, + // ErrNotStored will be returned instead. + ErrCASConflict = errors.New("memcache: compare-and-swap conflict") + + // ErrNotStored means that a conditional write operation (i.e. Add or + // CompareAndSwap) failed because the condition was not satisfied. + ErrNotStored = errors.New("memcache: item not stored") + + // ErrServer means that a server error occurred. + ErrServerError = errors.New("memcache: server error") + + // ErrNoStats means that no statistics were available. + ErrNoStats = errors.New("memcache: no statistics available") + + // ErrMalformedKey is returned when an invalid key is used. + // Keys must be at maximum 250 bytes long and not + // contain whitespace or control characters. + ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters") + + // ErrNoServers is returned when no servers are configured or available. + ErrNoServers = errors.New("memcache: no servers configured or available") +) + +const ( + // DefaultTimeout is the default socket read/write timeout. + DefaultTimeout = 500 * time.Millisecond + + // DefaultMaxIdleConns is the default maximum number of idle connections + // kept for any single address. + DefaultMaxIdleConns = 2 +) + +const buffered = 8 // arbitrary buffered channel size, for readability + +// resumableError returns true if err is only a protocol-level cache error. +// This is used to determine whether or not a server connection should +// be re-used or not. If an error occurs, by default we don't reuse the +// connection, unless it was just a cache error. +func resumableError(err error) bool { + switch err { + case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey: + return true + } + return false +} + +func legalKey(key string) bool { + if len(key) > 250 { + return false + } + for i := 0; i < len(key); i++ { + if key[i] <= ' ' || key[i] == 0x7f { + return false + } + } + return true +} + +var ( + crlf = []byte("\r\n") + space = []byte(" ") + resultOK = []byte("OK\r\n") + resultStored = []byte("STORED\r\n") + resultNotStored = []byte("NOT_STORED\r\n") + resultExists = []byte("EXISTS\r\n") + resultNotFound = []byte("NOT_FOUND\r\n") + resultDeleted = []byte("DELETED\r\n") + resultEnd = []byte("END\r\n") + resultOk = []byte("OK\r\n") + resultTouched = []byte("TOUCHED\r\n") + + resultClientErrorPrefix = []byte("CLIENT_ERROR ") + versionPrefix = []byte("VERSION") +) + +// New returns a memcache client using the provided server(s) +// with equal weight. If a server is listed multiple times, +// it gets a proportional amount of weight. +func New(server ...string) *Client { + ss := new(ServerList) + ss.SetServers(server...) + return NewFromSelector(ss) +} + +// NewFromSelector returns a new Client using the provided ServerSelector. +func NewFromSelector(ss ServerSelector) *Client { + return &Client{selector: ss} +} + +// Client is a memcache client. +// It is safe for unlocked use by multiple concurrent goroutines. +type Client struct { + // DialContext connects to the address on the named network using the + // provided context. + // + // To connect to servers using TLS (memcached running with "--enable-ssl"), + // use a DialContext func that uses tls.Dialer.DialContext. See this + // package's tests as an example. + DialContext func(ctx context.Context, network, address string) (net.Conn, error) + + // Timeout specifies the socket read/write timeout. + // If zero, DefaultTimeout is used. + Timeout time.Duration + + // MaxIdleConns specifies the maximum number of idle connections that will + // be maintained per address. If less than one, DefaultMaxIdleConns will be + // used. + // + // Consider your expected traffic rates and latency carefully. This should + // be set to a number higher than your peak parallel requests. + MaxIdleConns int + + selector ServerSelector + + lk sync.Mutex + freeconn map[string][]*conn +} + +// Item is an item to be got or stored in a memcached server. +type Item struct { + // Key is the Item's key (250 bytes maximum). + Key string + + // Value is the Item's value. + Value []byte + + // Flags are server-opaque flags whose semantics are entirely + // up to the app. + Flags uint32 + + // Expiration is the cache expiration time, in seconds: either a relative + // time from now (up to 1 month), or an absolute Unix epoch time. + // Zero means the Item has no expiration time. + Expiration int32 + + // CasID is the compare and swap ID. + // + // It's populated by get requests and then the same value is + // required for a CompareAndSwap request to succeed. + CasID uint64 +} + +// conn is a connection to a server. +type conn struct { + nc net.Conn + rw *bufio.ReadWriter + addr net.Addr + c *Client +} + +// release returns this connection back to the client's free pool +func (cn *conn) release() { + cn.c.putFreeConn(cn.addr, cn) +} + +func (cn *conn) extendDeadline() { + cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout())) +} + +// condRelease releases this connection if the error pointed to by err +// is nil (not an error) or is only a protocol level error (e.g. a +// cache miss). The purpose is to not recycle TCP connections that +// are bad. +func (cn *conn) condRelease(err *error) { + if *err == nil || resumableError(*err) { + cn.release() + } else { + cn.nc.Close() + } +} + +func (c *Client) putFreeConn(addr net.Addr, cn *conn) { + c.lk.Lock() + defer c.lk.Unlock() + if c.freeconn == nil { + c.freeconn = make(map[string][]*conn) + } + freelist := c.freeconn[addr.String()] + if len(freelist) >= c.maxIdleConns() { + cn.nc.Close() + return + } + c.freeconn[addr.String()] = append(freelist, cn) +} + +func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) { + c.lk.Lock() + defer c.lk.Unlock() + if c.freeconn == nil { + return nil, false + } + freelist, ok := c.freeconn[addr.String()] + if !ok || len(freelist) == 0 { + return nil, false + } + cn = freelist[len(freelist)-1] + c.freeconn[addr.String()] = freelist[:len(freelist)-1] + return cn, true +} + +func (c *Client) netTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + return DefaultTimeout +} + +func (c *Client) maxIdleConns() int { + if c.MaxIdleConns > 0 { + return c.MaxIdleConns + } + return DefaultMaxIdleConns +} + +// ConnectTimeoutError is the error type used when it takes +// too long to connect to the desired host. This level of +// detail can generally be ignored. +type ConnectTimeoutError struct { + Addr net.Addr +} + +func (cte *ConnectTimeoutError) Error() string { + return "memcache: connect timeout to " + cte.Addr.String() +} + +func (c *Client) dial(addr net.Addr) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.netTimeout()) + defer cancel() + + dialerContext := c.DialContext + if dialerContext == nil { + dialer := net.Dialer{ + Timeout: c.netTimeout(), + } + dialerContext = dialer.DialContext + } + + nc, err := dialerContext(ctx, addr.Network(), addr.String()) + if err == nil { + return nc, nil + } + + if ne, ok := err.(net.Error); ok && ne.Timeout() { + return nil, &ConnectTimeoutError{addr} + } + + return nil, err +} + +func (c *Client) getConn(addr net.Addr) (*conn, error) { + cn, ok := c.getFreeConn(addr) + if ok { + cn.extendDeadline() + return cn, nil + } + nc, err := c.dial(addr) + if err != nil { + return nil, err + } + cn = &conn{ + nc: nc, + addr: addr, + rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)), + c: c, + } + cn.extendDeadline() + return cn, nil +} + +func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error { + addr, err := c.selector.PickServer(item.Key) + if err != nil { + return err + } + cn, err := c.getConn(addr) + if err != nil { + return err + } + defer cn.condRelease(&err) + if err = fn(c, cn.rw, item); err != nil { + return err + } + return nil +} + +func (c *Client) FlushAll() error { + return c.selector.Each(c.flushAllFromAddr) +} + +// Get gets the item for the given key. ErrCacheMiss is returned for a +// memcache cache miss. The key must be at most 250 bytes in length. +func (c *Client) Get(key string) (item *Item, err error) { + err = c.withKeyAddr(key, func(addr net.Addr) error { + return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it }) + }) + if err == nil && item == nil { + err = ErrCacheMiss + } + return +} + +// Touch updates the expiry for the given key. The seconds parameter is either +// a Unix timestamp or, if seconds is less than 1 month, the number of seconds +// into the future at which time the item will expire. Zero means the item has +// no expiration time. ErrCacheMiss is returned if the key is not in the cache. +// The key must be at most 250 bytes in length. +func (c *Client) Touch(key string, seconds int32) (err error) { + return c.withKeyAddr(key, func(addr net.Addr) error { + return c.touchFromAddr(addr, []string{key}, seconds) + }) +} + +func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) { + if !legalKey(key) { + return ErrMalformedKey + } + addr, err := c.selector.PickServer(key) + if err != nil { + return err + } + return fn(addr) +} + +func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) { + cn, err := c.getConn(addr) + if err != nil { + return err + } + defer cn.condRelease(&err) + return fn(cn.rw) +} + +func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error { + return c.withKeyAddr(key, func(addr net.Addr) error { + return c.withAddrRw(addr, fn) + }) +} + +func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + if err := parseGetResponse(rw.Reader, cb); err != nil { + return err + } + return nil + }) +} + +// flushAllFromAddr send the flush_all command to the given addr +func (c *Client) flushAllFromAddr(addr net.Addr) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultOk): + break + default: + return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line)) + } + return nil + }) +} + +// ping sends the version command to the given addr +func (c *Client) ping(addr net.Addr) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "version\r\n"); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + + switch { + case bytes.HasPrefix(line, versionPrefix): + break + default: + return fmt.Errorf("memcache: unexpected response line from ping: %q", string(line)) + } + return nil + }) +} + +func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + for _, key := range keys { + if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultTouched): + break + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + default: + return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line)) + } + } + return nil + }) +} + +// GetMulti is a batch version of Get. The returned map from keys to +// items may have fewer elements than the input slice, due to memcache +// cache misses. Each key must be at most 250 bytes in length. +// If no error is returned, the returned map will also be non-nil. +func (c *Client) GetMulti(keys []string) (map[string]*Item, error) { + var lk sync.Mutex + m := make(map[string]*Item) + addItemToMap := func(it *Item) { + lk.Lock() + defer lk.Unlock() + m[it.Key] = it + } + + keyMap := make(map[net.Addr][]string) + for _, key := range keys { + if !legalKey(key) { + return nil, ErrMalformedKey + } + addr, err := c.selector.PickServer(key) + if err != nil { + return nil, err + } + keyMap[addr] = append(keyMap[addr], key) + } + + ch := make(chan error, buffered) + for addr, keys := range keyMap { + go func(addr net.Addr, keys []string) { + ch <- c.getFromAddr(addr, keys, addItemToMap) + }(addr, keys) + } + + var err error + for _ = range keyMap { + if ge := <-ch; ge != nil { + err = ge + } + } + return m, err +} + +// parseGetResponse reads a GET response from r and calls cb for each +// read and allocated Item +func parseGetResponse(r *bufio.Reader, cb func(*Item)) error { + for { + line, err := r.ReadSlice('\n') + if err != nil { + return err + } + if bytes.Equal(line, resultEnd) { + return nil + } + it := new(Item) + size, err := scanGetResponseLine(line, it) + if err != nil { + return err + } + it.Value = make([]byte, size+2) + _, err = io.ReadFull(r, it.Value) + if err != nil { + it.Value = nil + return err + } + if !bytes.HasSuffix(it.Value, crlf) { + it.Value = nil + return fmt.Errorf("memcache: corrupt get result read") + } + it.Value = it.Value[:size] + cb(it) + } +} + +// scanGetResponseLine populates it and returns the declared size of the item. +// It does not read the bytes of the item. +func scanGetResponseLine(line []byte, it *Item) (size int, err error) { + pattern := "VALUE %s %d %d %d\r\n" + dest := []interface{}{&it.Key, &it.Flags, &size, &it.CasID} + if bytes.Count(line, space) == 3 { + pattern = "VALUE %s %d %d\r\n" + dest = dest[:3] + } + n, err := fmt.Sscanf(string(line), pattern, dest...) + if err != nil || n != len(dest) { + return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line) + } + return size, nil +} + +// Set writes the given item, unconditionally. +func (c *Client) Set(item *Item) error { + return c.onItem(item, (*Client).set) +} + +func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "set", item) +} + +// Add writes the given item, if no value already exists for its +// key. ErrNotStored is returned if that condition is not met. +func (c *Client) Add(item *Item) error { + return c.onItem(item, (*Client).add) +} + +func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "add", item) +} + +// Replace writes the given item, but only if the server *does* +// already hold data for this key +func (c *Client) Replace(item *Item) error { + return c.onItem(item, (*Client).replace) +} + +func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "replace", item) +} + +// Append appends the given item to the existing item, if a value already +// exists for its key. ErrNotStored is returned if that condition is not met. +func (c *Client) Append(item *Item) error { + return c.onItem(item, (*Client).append) +} + +func (c *Client) append(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "append", item) +} + +// Prepend prepends the given item to the existing item, if a value already +// exists for its key. ErrNotStored is returned if that condition is not met. +func (c *Client) Prepend(item *Item) error { + return c.onItem(item, (*Client).prepend) +} + +func (c *Client) prepend(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "prepend", item) +} + +// CompareAndSwap writes the given item that was previously returned +// by Get, if the value was neither modified or evicted between the +// Get and the CompareAndSwap calls. The item's Key should not change +// between calls but all other item fields may differ. ErrCASConflict +// is returned if the value was modified in between the +// calls. ErrNotStored is returned if the value was evicted in between +// the calls. +func (c *Client) CompareAndSwap(item *Item) error { + return c.onItem(item, (*Client).cas) +} + +func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "cas", item) +} + +func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error { + if !legalKey(item.Key) { + return ErrMalformedKey + } + var err error + if verb == "cas" { + _, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n", + verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.CasID) + } else { + _, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n", + verb, item.Key, item.Flags, item.Expiration, len(item.Value)) + } + if err != nil { + return err + } + if _, err = rw.Write(item.Value); err != nil { + return err + } + if _, err := rw.Write(crlf); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultStored): + return nil + case bytes.Equal(line, resultNotStored): + return ErrNotStored + case bytes.Equal(line, resultExists): + return ErrCASConflict + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + } + return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line)) +} + +func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) { + _, err := fmt.Fprintf(rw, format, args...) + if err != nil { + return nil, err + } + if err := rw.Flush(); err != nil { + return nil, err + } + line, err := rw.ReadSlice('\n') + return line, err +} + +func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error { + line, err := writeReadLine(rw, format, args...) + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultOK): + return nil + case bytes.Equal(line, expect): + return nil + case bytes.Equal(line, resultNotStored): + return ErrNotStored + case bytes.Equal(line, resultExists): + return ErrCASConflict + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + } + return fmt.Errorf("memcache: unexpected response line: %q", string(line)) +} + +// Delete deletes the item with the provided key. The error ErrCacheMiss is +// returned if the item didn't already exist in the cache. +func (c *Client) Delete(key string) error { + return c.withKeyRw(key, func(rw *bufio.ReadWriter) error { + return writeExpectf(rw, resultDeleted, "delete %s\r\n", key) + }) +} + +// DeleteAll deletes all items in the cache. +func (c *Client) DeleteAll() error { + return c.withKeyRw("", func(rw *bufio.ReadWriter) error { + return writeExpectf(rw, resultDeleted, "flush_all\r\n") + }) +} + +// Ping checks all instances if they are alive. Returns error if any +// of them is down. +func (c *Client) Ping() error { + return c.selector.Each(c.ping) +} + +// Increment atomically increments key by delta. The return value is +// the new value after being incremented or an error. If the value +// didn't exist in memcached the error is ErrCacheMiss. The value in +// memcached must be an decimal number, or an error will be returned. +// On 64-bit overflow, the new value wraps around. +func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) { + return c.incrDecr("incr", key, delta) +} + +// Decrement atomically decrements key by delta. The return value is +// the new value after being decremented or an error. If the value +// didn't exist in memcached the error is ErrCacheMiss. The value in +// memcached must be an decimal number, or an error will be returned. +// On underflow, the new value is capped at zero and does not wrap +// around. +func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) { + return c.incrDecr("decr", key, delta) +} + +func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) { + var val uint64 + err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error { + line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta) + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + case bytes.HasPrefix(line, resultClientErrorPrefix): + errMsg := line[len(resultClientErrorPrefix) : len(line)-2] + return errors.New("memcache: client error: " + string(errMsg)) + } + val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64) + if err != nil { + return err + } + return nil + }) + return val, err +} + +// Close closes any open connections. +// +// It returns the first error encountered closing connections, but always +// closes all connections. +// +// After Close, the Client may still be used. +func (c *Client) Close() error { + c.lk.Lock() + defer c.lk.Unlock() + var ret error + for _, conns := range c.freeconn { + for _, c := range conns { + if err := c.nc.Close(); err != nil && ret == nil { + ret = err + } + } + } + c.freeconn = nil + return ret +} diff --git a/hotelReservation/vendor/github.com/bradfitz/gomemcache/memcache/selector.go b/hotelReservation/vendor/github.com/bradfitz/gomemcache/memcache/selector.go new file mode 100644 index 000000000..964dbdb6a --- /dev/null +++ b/hotelReservation/vendor/github.com/bradfitz/gomemcache/memcache/selector.go @@ -0,0 +1,129 @@ +/* +Copyright 2011 The gomemcache AUTHORS + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memcache + +import ( + "hash/crc32" + "net" + "strings" + "sync" +) + +// ServerSelector is the interface that selects a memcache server +// as a function of the item's key. +// +// All ServerSelector implementations must be safe for concurrent use +// by multiple goroutines. +type ServerSelector interface { + // PickServer returns the server address that a given item + // should be shared onto. + PickServer(key string) (net.Addr, error) + Each(func(net.Addr) error) error +} + +// ServerList is a simple ServerSelector. Its zero value is usable. +type ServerList struct { + mu sync.RWMutex + addrs []net.Addr +} + +// staticAddr caches the Network() and String() values from any net.Addr. +type staticAddr struct { + ntw, str string +} + +func newStaticAddr(a net.Addr) net.Addr { + return &staticAddr{ + ntw: a.Network(), + str: a.String(), + } +} + +func (s *staticAddr) Network() string { return s.ntw } +func (s *staticAddr) String() string { return s.str } + +// SetServers changes a ServerList's set of servers at runtime and is +// safe for concurrent use by multiple goroutines. +// +// Each server is given equal weight. A server is given more weight +// if it's listed multiple times. +// +// SetServers returns an error if any of the server names fail to +// resolve. No attempt is made to connect to the server. If any error +// is returned, no changes are made to the ServerList. +func (ss *ServerList) SetServers(servers ...string) error { + naddr := make([]net.Addr, len(servers)) + for i, server := range servers { + if strings.Contains(server, "/") { + addr, err := net.ResolveUnixAddr("unix", server) + if err != nil { + return err + } + naddr[i] = newStaticAddr(addr) + } else { + tcpaddr, err := net.ResolveTCPAddr("tcp", server) + if err != nil { + return err + } + naddr[i] = newStaticAddr(tcpaddr) + } + } + + ss.mu.Lock() + defer ss.mu.Unlock() + ss.addrs = naddr + return nil +} + +// Each iterates over each server calling the given function +func (ss *ServerList) Each(f func(net.Addr) error) error { + ss.mu.RLock() + defer ss.mu.RUnlock() + for _, a := range ss.addrs { + if err := f(a); nil != err { + return err + } + } + return nil +} + +// keyBufPool returns []byte buffers for use by PickServer's call to +// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the +// copies, which at least are bounded in size and small) +var keyBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 256) + return &b + }, +} + +func (ss *ServerList) PickServer(key string) (net.Addr, error) { + ss.mu.RLock() + defer ss.mu.RUnlock() + if len(ss.addrs) == 0 { + return nil, ErrNoServers + } + if len(ss.addrs) == 1 { + return ss.addrs[0], nil + } + bufp := keyBufPool.Get().(*[]byte) + n := copy(*bufp, key) + cs := crc32.ChecksumIEEE((*bufp)[:n]) + keyBufPool.Put(bufp) + + return ss.addrs[cs%uint32(len(ss.addrs))], nil +} diff --git a/hotelReservation/vendor/github.com/codahale/hdrhistogram/.travis.yml b/hotelReservation/vendor/github.com/codahale/hdrhistogram/.travis.yml deleted file mode 100644 index 7960fc95b..000000000 --- a/hotelReservation/vendor/github.com/codahale/hdrhistogram/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.5 - - 1.6 - - tip diff --git a/hotelReservation/vendor/github.com/codahale/hdrhistogram/LICENSE b/hotelReservation/vendor/github.com/codahale/hdrhistogram/LICENSE deleted file mode 100644 index f9835c241..000000000 --- a/hotelReservation/vendor/github.com/codahale/hdrhistogram/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Coda Hale - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/hotelReservation/vendor/github.com/codahale/hdrhistogram/README.md b/hotelReservation/vendor/github.com/codahale/hdrhistogram/README.md deleted file mode 100644 index 614b197c3..000000000 --- a/hotelReservation/vendor/github.com/codahale/hdrhistogram/README.md +++ /dev/null @@ -1,15 +0,0 @@ -hdrhistogram -============ - -[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram) - -A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram). - -> A Histogram that supports recording and analyzing sampled data value counts -> across a configurable integer value range with configurable value precision -> within the range. Value precision is expressed as the number of significant -> digits in the value recording, and provides control over value quantization -> behavior across the value range and the subsequent value resolution at any -> given level. - -For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram). diff --git a/hotelReservation/vendor/github.com/codahale/hdrhistogram/hdr.go b/hotelReservation/vendor/github.com/codahale/hdrhistogram/hdr.go deleted file mode 100644 index c97842926..000000000 --- a/hotelReservation/vendor/github.com/codahale/hdrhistogram/hdr.go +++ /dev/null @@ -1,564 +0,0 @@ -// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram -// data structure. The HDR Histogram allows for fast and accurate analysis of -// the extreme ranges of data with non-normal distributions, like latency. -package hdrhistogram - -import ( - "fmt" - "math" -) - -// A Bracket is a part of a cumulative distribution. -type Bracket struct { - Quantile float64 - Count, ValueAt int64 -} - -// A Snapshot is an exported view of a Histogram, useful for serializing them. -// A Histogram can be constructed from it by passing it to Import. -type Snapshot struct { - LowestTrackableValue int64 - HighestTrackableValue int64 - SignificantFigures int64 - Counts []int64 -} - -// A Histogram is a lossy data structure used to record the distribution of -// non-normally distributed data (like latency) with a high degree of accuracy -// and a bounded degree of precision. -type Histogram struct { - lowestTrackableValue int64 - highestTrackableValue int64 - unitMagnitude int64 - significantFigures int64 - subBucketHalfCountMagnitude int32 - subBucketHalfCount int32 - subBucketMask int64 - subBucketCount int32 - bucketCount int32 - countsLen int32 - totalCount int64 - counts []int64 -} - -// New returns a new Histogram instance capable of tracking values in the given -// range and with the given amount of precision. -func New(minValue, maxValue int64, sigfigs int) *Histogram { - if sigfigs < 1 || 5 < sigfigs { - panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs)) - } - - largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs) - subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution)))) - - subBucketHalfCountMagnitude := subBucketCountMagnitude - if subBucketHalfCountMagnitude < 1 { - subBucketHalfCountMagnitude = 1 - } - subBucketHalfCountMagnitude-- - - unitMagnitude := int32(math.Floor(math.Log2(float64(minValue)))) - if unitMagnitude < 0 { - unitMagnitude = 0 - } - - subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1)) - - subBucketHalfCount := subBucketCount / 2 - subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude) - - // determine exponent range needed to support the trackable value with no - // overflow: - smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude) - bucketsNeeded := int32(1) - for smallestUntrackableValue < maxValue { - smallestUntrackableValue <<= 1 - bucketsNeeded++ - } - - bucketCount := bucketsNeeded - countsLen := (bucketCount + 1) * (subBucketCount / 2) - - return &Histogram{ - lowestTrackableValue: minValue, - highestTrackableValue: maxValue, - unitMagnitude: int64(unitMagnitude), - significantFigures: int64(sigfigs), - subBucketHalfCountMagnitude: subBucketHalfCountMagnitude, - subBucketHalfCount: subBucketHalfCount, - subBucketMask: subBucketMask, - subBucketCount: subBucketCount, - bucketCount: bucketCount, - countsLen: countsLen, - totalCount: 0, - counts: make([]int64, countsLen), - } -} - -// ByteSize returns an estimate of the amount of memory allocated to the -// histogram in bytes. -// -// N.B.: This does not take into account the overhead for slices, which are -// small, constant, and specific to the compiler version. -func (h *Histogram) ByteSize() int { - return 6*8 + 5*4 + len(h.counts)*8 -} - -// Merge merges the data stored in the given histogram with the receiver, -// returning the number of recorded values which had to be dropped. -func (h *Histogram) Merge(from *Histogram) (dropped int64) { - i := from.rIterator() - for i.next() { - v := i.valueFromIdx - c := i.countAtIdx - - if h.RecordValues(v, c) != nil { - dropped += c - } - } - - return -} - -// TotalCount returns total number of values recorded. -func (h *Histogram) TotalCount() int64 { - return h.totalCount -} - -// Max returns the approximate maximum recorded value. -func (h *Histogram) Max() int64 { - var max int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - max = i.highestEquivalentValue - } - } - return h.highestEquivalentValue(max) -} - -// Min returns the approximate minimum recorded value. -func (h *Histogram) Min() int64 { - var min int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 && min == 0 { - min = i.highestEquivalentValue - break - } - } - return h.lowestEquivalentValue(min) -} - -// Mean returns the approximate arithmetic mean of the recorded values. -func (h *Histogram) Mean() float64 { - if h.totalCount == 0 { - return 0 - } - var total int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx) - } - } - return float64(total) / float64(h.totalCount) -} - -// StdDev returns the approximate standard deviation of the recorded values. -func (h *Histogram) StdDev() float64 { - if h.totalCount == 0 { - return 0 - } - - mean := h.Mean() - geometricDevTotal := 0.0 - - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean - geometricDevTotal += (dev * dev) * float64(i.countAtIdx) - } - } - - return math.Sqrt(geometricDevTotal / float64(h.totalCount)) -} - -// Reset deletes all recorded values and restores the histogram to its original -// state. -func (h *Histogram) Reset() { - h.totalCount = 0 - for i := range h.counts { - h.counts[i] = 0 - } -} - -// RecordValue records the given value, returning an error if the value is out -// of range. -func (h *Histogram) RecordValue(v int64) error { - return h.RecordValues(v, 1) -} - -// RecordCorrectedValue records the given value, correcting for stalls in the -// recording process. This only works for processes which are recording values -// at an expected interval (e.g., doing jitter analysis). Processes which are -// recording ad-hoc values (e.g., latency for incoming requests) can't take -// advantage of this. -func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { - if err := h.RecordValue(v); err != nil { - return err - } - - if expectedInterval <= 0 || v <= expectedInterval { - return nil - } - - missingValue := v - expectedInterval - for missingValue >= expectedInterval { - if err := h.RecordValue(missingValue); err != nil { - return err - } - missingValue -= expectedInterval - } - - return nil -} - -// RecordValues records n occurrences of the given value, returning an error if -// the value is out of range. -func (h *Histogram) RecordValues(v, n int64) error { - idx := h.countsIndexFor(v) - if idx < 0 || int(h.countsLen) <= idx { - return fmt.Errorf("value %d is too large to be recorded", v) - } - h.counts[idx] += n - h.totalCount += n - - return nil -} - -// ValueAtQuantile returns the recorded value at the given quantile (0..100). -func (h *Histogram) ValueAtQuantile(q float64) int64 { - if q > 100 { - q = 100 - } - - total := int64(0) - countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5) - - i := h.iterator() - for i.next() { - total += i.countAtIdx - if total >= countAtPercentile { - return h.highestEquivalentValue(i.valueFromIdx) - } - } - - return 0 -} - -// CumulativeDistribution returns an ordered list of brackets of the -// distribution of recorded values. -func (h *Histogram) CumulativeDistribution() []Bracket { - var result []Bracket - - i := h.pIterator(1) - for i.next() { - result = append(result, Bracket{ - Quantile: i.percentile, - Count: i.countToIdx, - ValueAt: i.highestEquivalentValue, - }) - } - - return result -} - -// SignificantFigures returns the significant figures used to create the -// histogram -func (h *Histogram) SignificantFigures() int64 { - return h.significantFigures -} - -// LowestTrackableValue returns the lower bound on values that will be added -// to the histogram -func (h *Histogram) LowestTrackableValue() int64 { - return h.lowestTrackableValue -} - -// HighestTrackableValue returns the upper bound on values that will be added -// to the histogram -func (h *Histogram) HighestTrackableValue() int64 { - return h.highestTrackableValue -} - -// Histogram bar for plotting -type Bar struct { - From, To, Count int64 -} - -// Pretty print as csv for easy plotting -func (b Bar) String() string { - return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count) -} - -// Distribution returns an ordered list of bars of the -// distribution of recorded values, counts can be normalized to a probability -func (h *Histogram) Distribution() (result []Bar) { - i := h.iterator() - for i.next() { - result = append(result, Bar{ - Count: i.countAtIdx, - From: h.lowestEquivalentValue(i.valueFromIdx), - To: i.highestEquivalentValue, - }) - } - - return result -} - -// Equals returns true if the two Histograms are equivalent, false if not. -func (h *Histogram) Equals(other *Histogram) bool { - switch { - case - h.lowestTrackableValue != other.lowestTrackableValue, - h.highestTrackableValue != other.highestTrackableValue, - h.unitMagnitude != other.unitMagnitude, - h.significantFigures != other.significantFigures, - h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude, - h.subBucketHalfCount != other.subBucketHalfCount, - h.subBucketMask != other.subBucketMask, - h.subBucketCount != other.subBucketCount, - h.bucketCount != other.bucketCount, - h.countsLen != other.countsLen, - h.totalCount != other.totalCount: - return false - default: - for i, c := range h.counts { - if c != other.counts[i] { - return false - } - } - } - return true -} - -// Export returns a snapshot view of the Histogram. This can be later passed to -// Import to construct a new Histogram with the same state. -func (h *Histogram) Export() *Snapshot { - return &Snapshot{ - LowestTrackableValue: h.lowestTrackableValue, - HighestTrackableValue: h.highestTrackableValue, - SignificantFigures: h.significantFigures, - Counts: append([]int64(nil), h.counts...), // copy - } -} - -// Import returns a new Histogram populated from the Snapshot data (which the -// caller must stop accessing). -func Import(s *Snapshot) *Histogram { - h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures)) - h.counts = s.Counts - totalCount := int64(0) - for i := int32(0); i < h.countsLen; i++ { - countAtIndex := h.counts[i] - if countAtIndex > 0 { - totalCount += countAtIndex - } - } - h.totalCount = totalCount - return h -} - -func (h *Histogram) iterator() *iterator { - return &iterator{ - h: h, - subBucketIdx: -1, - } -} - -func (h *Histogram) rIterator() *rIterator { - return &rIterator{ - iterator: iterator{ - h: h, - subBucketIdx: -1, - }, - } -} - -func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator { - return &pIterator{ - iterator: iterator{ - h: h, - subBucketIdx: -1, - }, - ticksPerHalfDistance: ticksPerHalfDistance, - } -} - -func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - adjustedBucket := bucketIdx - if subBucketIdx >= h.subBucketCount { - adjustedBucket++ - } - return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket)) -} - -func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 { - return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude) -} - -func (h *Histogram) lowestEquivalentValue(v int64) int64 { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - return h.valueFromIndex(bucketIdx, subBucketIdx) -} - -func (h *Histogram) nextNonEquivalentValue(v int64) int64 { - return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v) -} - -func (h *Histogram) highestEquivalentValue(v int64) int64 { - return h.nextNonEquivalentValue(v) - 1 -} - -func (h *Histogram) medianEquivalentValue(v int64) int64 { - return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1) -} - -func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 { - return h.counts[h.countsIndex(bucketIdx, subBucketIdx)] -} - -func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 { - bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude) - offsetInBucket := subBucketIdx - h.subBucketHalfCount - return bucketBaseIdx + offsetInBucket -} - -func (h *Histogram) getBucketIndex(v int64) int32 { - pow2Ceiling := bitLen(v | h.subBucketMask) - return int32(pow2Ceiling - int64(h.unitMagnitude) - - int64(h.subBucketHalfCountMagnitude+1)) -} - -func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 { - return int32(v >> uint(int64(idx)+int64(h.unitMagnitude))) -} - -func (h *Histogram) countsIndexFor(v int64) int { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - return int(h.countsIndex(bucketIdx, subBucketIdx)) -} - -type iterator struct { - h *Histogram - bucketIdx, subBucketIdx int32 - countAtIdx, countToIdx, valueFromIdx int64 - highestEquivalentValue int64 -} - -func (i *iterator) next() bool { - if i.countToIdx >= i.h.totalCount { - return false - } - - // increment bucket - i.subBucketIdx++ - if i.subBucketIdx >= i.h.subBucketCount { - i.subBucketIdx = i.h.subBucketHalfCount - i.bucketIdx++ - } - - if i.bucketIdx >= i.h.bucketCount { - return false - } - - i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx) - i.countToIdx += i.countAtIdx - i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx) - i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx) - - return true -} - -type rIterator struct { - iterator - countAddedThisStep int64 -} - -func (r *rIterator) next() bool { - for r.iterator.next() { - if r.countAtIdx != 0 { - r.countAddedThisStep = r.countAtIdx - return true - } - } - return false -} - -type pIterator struct { - iterator - seenLastValue bool - ticksPerHalfDistance int32 - percentileToIteratorTo float64 - percentile float64 -} - -func (p *pIterator) next() bool { - if !(p.countToIdx < p.h.totalCount) { - if p.seenLastValue { - return false - } - - p.seenLastValue = true - p.percentile = 100 - - return true - } - - if p.subBucketIdx == -1 && !p.iterator.next() { - return false - } - - var done = false - for !done { - currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount) - if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile { - p.percentile = p.percentileToIteratorTo - halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1)) - percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance - p.percentileToIteratorTo += 100.0 / percentileReportingTicks - return true - } - done = !p.iterator.next() - } - - return true -} - -func bitLen(x int64) (n int64) { - for ; x >= 0x8000; x >>= 16 { - n += 16 - } - if x >= 0x80 { - x >>= 8 - n += 8 - } - if x >= 0x8 { - x >>= 4 - n += 4 - } - if x >= 0x2 { - x >>= 2 - n += 2 - } - if x >= 0x1 { - n++ - } - return -} diff --git a/hotelReservation/vendor/github.com/codahale/hdrhistogram/window.go b/hotelReservation/vendor/github.com/codahale/hdrhistogram/window.go deleted file mode 100644 index dc43612a4..000000000 --- a/hotelReservation/vendor/github.com/codahale/hdrhistogram/window.go +++ /dev/null @@ -1,45 +0,0 @@ -package hdrhistogram - -// A WindowedHistogram combines histograms to provide windowed statistics. -type WindowedHistogram struct { - idx int - h []Histogram - m *Histogram - - Current *Histogram -} - -// NewWindowed creates a new WindowedHistogram with N underlying histograms with -// the given parameters. -func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram { - w := WindowedHistogram{ - idx: -1, - h: make([]Histogram, n), - m: New(minValue, maxValue, sigfigs), - } - - for i := range w.h { - w.h[i] = *New(minValue, maxValue, sigfigs) - } - w.Rotate() - - return &w -} - -// Merge returns a histogram which includes the recorded values from all the -// sections of the window. -func (w *WindowedHistogram) Merge() *Histogram { - w.m.Reset() - for _, h := range w.h { - w.m.Merge(&h) - } - return w.m -} - -// Rotate resets the oldest histogram and rotates it to be used as the current -// histogram. -func (w *WindowedHistogram) Rotate() { - w.idx++ - w.Current = &w.h[w.idx%len(w.h)] - w.Current.Reset() -} diff --git a/hotelReservation/vendor/github.com/golang/protobuf/ptypes/regen.sh b/hotelReservation/vendor/github.com/golang/protobuf/ptypes/regen.sh old mode 100755 new mode 100644 diff --git a/hotelReservation/vendor/github.com/golang/snappy/.gitignore b/hotelReservation/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 000000000..042091d9b --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/hotelReservation/vendor/github.com/golang/snappy/AUTHORS b/hotelReservation/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 000000000..bcfa19520 --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/hotelReservation/vendor/github.com/golang/snappy/CONTRIBUTORS b/hotelReservation/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 000000000..931ae3160 --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/hotelReservation/vendor/github.com/golang/snappy/LICENSE b/hotelReservation/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hotelReservation/vendor/github.com/golang/snappy/README b/hotelReservation/vendor/github.com/golang/snappy/README new file mode 100644 index 000000000..cea12879a --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/hotelReservation/vendor/github.com/golang/snappy/decode.go b/hotelReservation/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 000000000..72efb0353 --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/hotelReservation/vendor/github.com/golang/snappy/decode_amd64.go b/hotelReservation/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 000000000..fcd192b84 --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/hotelReservation/vendor/github.com/golang/snappy/decode_amd64.s b/hotelReservation/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 000000000..e6179f65e --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/hotelReservation/vendor/github.com/golang/snappy/decode_other.go b/hotelReservation/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 000000000..8c9f2049b --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/hotelReservation/vendor/github.com/golang/snappy/encode.go b/hotelReservation/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 000000000..8d393e904 --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/hotelReservation/vendor/github.com/golang/snappy/encode_amd64.go b/hotelReservation/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 000000000..150d91bc8 --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/hotelReservation/vendor/github.com/golang/snappy/encode_amd64.s b/hotelReservation/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 000000000..adfd979fe --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/hotelReservation/vendor/github.com/golang/snappy/encode_other.go b/hotelReservation/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 000000000..dbcae905e --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/hotelReservation/vendor/github.com/golang/snappy/snappy.go b/hotelReservation/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 000000000..ece692ea4 --- /dev/null +++ b/hotelReservation/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/hotelReservation/vendor/github.com/google/uuid/CHANGELOG.md b/hotelReservation/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 000000000..7ed347d3a --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,21 @@ +# Changelog + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/hotelReservation/vendor/github.com/google/uuid/CONTRIBUTING.md b/hotelReservation/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..a502fdc51 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/hotelReservation/vendor/github.com/google/uuid/CONTRIBUTORS b/hotelReservation/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/hotelReservation/vendor/github.com/google/uuid/LICENSE b/hotelReservation/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hotelReservation/vendor/github.com/google/uuid/README.md b/hotelReservation/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..3e9a61889 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/README.md @@ -0,0 +1,21 @@ +# uuid +The uuid package generates and inspects UUIDs based on +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +```sh +go get github.com/google/uuid +``` + +###### Documentation +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/hotelReservation/vendor/github.com/google/uuid/dce.go b/hotelReservation/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/hotelReservation/vendor/github.com/google/uuid/doc.go b/hotelReservation/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/hotelReservation/vendor/github.com/google/uuid/hash.go b/hotelReservation/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b404f4bec --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/hotelReservation/vendor/github.com/google/uuid/marshal.go b/hotelReservation/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..14bd34072 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/hotelReservation/vendor/github.com/google/uuid/node.go b/hotelReservation/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/hotelReservation/vendor/github.com/google/uuid/node_js.go b/hotelReservation/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..b2a0bc871 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This removes the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/hotelReservation/vendor/github.com/google/uuid/node_net.go b/hotelReservation/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/hotelReservation/vendor/github.com/google/uuid/null.go b/hotelReservation/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000..d7fcbf286 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/hotelReservation/vendor/github.com/google/uuid/sql.go b/hotelReservation/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..2e02ec06c --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/hotelReservation/vendor/github.com/google/uuid/time.go b/hotelReservation/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/hotelReservation/vendor/github.com/google/uuid/util.go b/hotelReservation/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/hotelReservation/vendor/github.com/google/uuid/uuid.go b/hotelReservation/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..dc75f7d99 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,312 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/hotelReservation/vendor/github.com/google/uuid/version1.go b/hotelReservation/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..463109629 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/hotelReservation/vendor/github.com/google/uuid/version4.go b/hotelReservation/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..7697802e4 --- /dev/null +++ b/hotelReservation/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/hotelReservation/vendor/github.com/hashicorp/consul/website/LICENSE.md b/hotelReservation/vendor/github.com/hashicorp/consul/website/LICENSE.md deleted file mode 100644 index 3189f43a6..000000000 --- a/hotelReservation/vendor/github.com/hashicorp/consul/website/LICENSE.md +++ /dev/null @@ -1,10 +0,0 @@ -# Proprietary License - -This license is temporary while a more official one is drafted. However, -this should make it clear: - -The text contents of this website are MPL 2.0 licensed. - -The design contents of this website are proprietary and may not be reproduced -or reused in any way other than to run the website locally. The license for -the design is owned solely by HashiCorp, Inc. diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml new file mode 100644 index 000000000..1a0bbea6c --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/README.md b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 000000000..8910fcc03 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,41 @@ +go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 000000000..a63674775 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 000000000..e5e6e57f2 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,662 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 000000000..9815e0253 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,91 @@ +package iradix + +import "bytes" + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/node.go b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 000000000..7a065e7a0 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,292 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 000000000..04814c132 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/hotelReservation/vendor/github.com/hashicorp/golang-lru/LICENSE b/hotelReservation/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/hotelReservation/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/hotelReservation/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..5673773b2 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/hotelReservation/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/hotelReservation/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..74c707744 --- /dev/null +++ b/hotelReservation/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/hotelReservation/vendor/github.com/hashicorp/serf/coordinate/client.go b/hotelReservation/vendor/github.com/hashicorp/serf/coordinate/client.go index 613bfff89..3582ee4da 100644 --- a/hotelReservation/vendor/github.com/hashicorp/serf/coordinate/client.go +++ b/hotelReservation/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -6,6 +6,8 @@ import ( "sort" "sync" "time" + + "github.com/armon/go-metrics" ) // Client manages the estimated network coordinate for a given node, and adjusts @@ -34,10 +36,20 @@ type Client struct { // value to determine how many samples we keep, per node. latencyFilterSamples map[string][]float64 + // stats is used to record events that occur when updating coordinates. + stats ClientStats + // mutex enables safe concurrent access to the client. mutex sync.RWMutex } +// ClientStats is used to record events that occur when updating coordinates. +type ClientStats struct { + // Resets is incremented any time we reset our local coordinate because + // our calculations have resulted in an invalid state. + Resets int +} + // NewClient creates a new Client and verifies the configuration is valid. func NewClient(config *Config) (*Client, error) { if !(config.Dimensionality > 0) { @@ -63,11 +75,16 @@ func (c *Client) GetCoordinate() *Coordinate { } // SetCoordinate forces the client's coordinate to a known state. -func (c *Client) SetCoordinate(coord *Coordinate) { +func (c *Client) SetCoordinate(coord *Coordinate) error { c.mutex.Lock() defer c.mutex.Unlock() + if err := c.checkCoordinate(coord); err != nil { + return err + } + c.coord = coord.Clone() + return nil } // ForgetNode removes any client state for the given node. @@ -78,6 +95,29 @@ func (c *Client) ForgetNode(node string) { delete(c.latencyFilterSamples, node) } +// Stats returns a copy of stats for the client. +func (c *Client) Stats() ClientStats { + c.mutex.Lock() + defer c.mutex.Unlock() + + return c.stats +} + +// checkCoordinate returns an error if the coordinate isn't compatible with +// this client, or if the coordinate itself isn't valid. This assumes the mutex +// has been locked already. +func (c *Client) checkCoordinate(coord *Coordinate) error { + if !c.coord.IsCompatibleWith(coord) { + return fmt.Errorf("dimensions aren't compatible") + } + + if !coord.IsValid() { + return fmt.Errorf("coordinate is invalid") + } + + return nil +} + // latencyFilter applies a simple moving median filter with a new sample for // a node. This assumes that the mutex has been locked already. func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { @@ -159,15 +199,38 @@ func (c *Client) updateGravity() { // Update takes other, a coordinate for another node, and rtt, a round trip // time observation for a ping to that node, and updates the estimated position of // the client's coordinate. Returns the updated coordinate. -func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate { +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) { c.mutex.Lock() defer c.mutex.Unlock() + if err := c.checkCoordinate(other); err != nil { + return nil, err + } + + // The code down below can handle zero RTTs, which we have seen in + // https://github.com/hashicorp/consul/issues/3789, presumably in + // environments with coarse-grained monotonic clocks (we are still + // trying to pin this down). In any event, this is ok from a code PoV + // so we don't need to alert operators with spammy messages. We did + // add a counter so this is still observable, though. + const maxRTT = 10 * time.Second + if rtt < 0 || rtt > maxRTT { + return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) + } + if rtt == 0 { + metrics.IncrCounter([]string{"serf", "coordinate", "zero-rtt"}, 1) + } + rttSeconds := c.latencyFilter(node, rtt.Seconds()) c.updateVivaldi(other, rttSeconds) c.updateAdjustment(other, rttSeconds) c.updateGravity() - return c.coord.Clone() + if !c.coord.IsValid() { + c.stats.Resets++ + c.coord = NewCoordinate(c.config) + } + + return c.coord.Clone(), nil } // DistanceTo returns the estimated RTT from the client's coordinate to other, the diff --git a/hotelReservation/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/hotelReservation/vendor/github.com/hashicorp/serf/coordinate/coordinate.go index c9194e048..fbe792c90 100644 --- a/hotelReservation/vendor/github.com/hashicorp/serf/coordinate/coordinate.go +++ b/hotelReservation/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -72,6 +72,26 @@ func (c *Coordinate) Clone() *Coordinate { } } +// componentIsValid returns false if a floating point value is a NaN or an +// infinity. +func componentIsValid(f float64) bool { + return !math.IsInf(f, 0) && !math.IsNaN(f) +} + +// IsValid returns false if any component of a coordinate isn't valid, per the +// componentIsValid() helper above. +func (c *Coordinate) IsValid() bool { + for i := range c.Vec { + if !componentIsValid(c.Vec[i]) { + return false + } + } + + return componentIsValid(c.Error) && + componentIsValid(c.Adjustment) && + componentIsValid(c.Height) +} + // IsCompatibleWith checks to see if the two coordinates are compatible // dimensionally. If this returns true then you are guaranteed to not get // any runtime errors operating on them. @@ -122,7 +142,7 @@ func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { // already been checked to be compatible. func add(vec1 []float64, vec2 []float64) []float64 { ret := make([]float64, len(vec1)) - for i, _ := range ret { + for i := range ret { ret[i] = vec1[i] + vec2[i] } return ret @@ -132,7 +152,7 @@ func add(vec1 []float64, vec2 []float64) []float64 { // dimensions have already been checked to be compatible. func diff(vec1 []float64, vec2 []float64) []float64 { ret := make([]float64, len(vec1)) - for i, _ := range ret { + for i := range ret { ret[i] = vec1[i] - vec2[i] } return ret @@ -141,7 +161,7 @@ func diff(vec1 []float64, vec2 []float64) []float64 { // mul returns vec multiplied by a scalar factor. func mul(vec []float64, factor float64) []float64 { ret := make([]float64, len(vec)) - for i, _ := range vec { + for i := range vec { ret[i] = vec[i] * factor } return ret @@ -150,7 +170,7 @@ func mul(vec []float64, factor float64) []float64 { // magnitude computes the magnitude of the vec. func magnitude(vec []float64) float64 { sum := 0.0 - for i, _ := range vec { + for i := range vec { sum += vec[i] * vec[i] } return math.Sqrt(sum) @@ -168,7 +188,7 @@ func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { } // Otherwise, just return a random unit vector. - for i, _ := range ret { + for i := range ret { ret[i] = rand.Float64() - 0.5 } if mag := magnitude(ret); mag > zeroThreshold { diff --git a/hotelReservation/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright b/hotelReservation/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright deleted file mode 100644 index 21a1a1b53..000000000 --- a/hotelReservation/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright +++ /dev/null @@ -1,2 +0,0 @@ -Name: serf -Copyright: Hashicorp 2013 diff --git a/hotelReservation/vendor/github.com/hashicorp/serf/website/source/LICENSE b/hotelReservation/vendor/github.com/hashicorp/serf/website/source/LICENSE deleted file mode 100644 index 36c29d7f7..000000000 --- a/hotelReservation/vendor/github.com/hashicorp/serf/website/source/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -# Proprietary License - -This license is temporary while a more official one is drafted. However, -this should make it clear: - -* The text contents of this website are MPL 2.0 licensed. - -* The design contents of this website are proprietary and may not be reproduced - or reused in any way other than to run the Serf website locally. The license - for the design is owned solely by HashiCorp, Inc. diff --git a/hotelReservation/vendor/github.com/klauspost/compress/.gitattributes b/hotelReservation/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 000000000..402433593 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/hotelReservation/vendor/github.com/klauspost/compress/.gitignore b/hotelReservation/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 000000000..b35f8449b --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe diff --git a/hotelReservation/vendor/github.com/klauspost/compress/.goreleaser.yml b/hotelReservation/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 000000000..c9014ce1d --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,137 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/hotelReservation/vendor/github.com/klauspost/compress/LICENSE b/hotelReservation/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 000000000..87d557477 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/hotelReservation/vendor/github.com/klauspost/compress/README.md b/hotelReservation/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 000000000..3429879eb --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,438 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. +* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) + +
+ See changes prior to v1.12.1 + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes prior to v1.11.0 + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). +* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). +* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). +* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/hotelReservation/vendor/github.com/klauspost/compress/compressible.go b/hotelReservation/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 000000000..ea5a692d5 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/fse/README.md b/hotelReservation/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 000000000..ea7324da6 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/hotelReservation/vendor/github.com/klauspost/compress/fse/bitreader.go b/hotelReservation/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 000000000..f65eb3909 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/fse/bitwriter.go b/hotelReservation/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 000000000..43e463611 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/fse/bytereader.go b/hotelReservation/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 000000000..abade2d60 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/fse/compress.go b/hotelReservation/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 000000000..6f341914c --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/fse/fse.go b/hotelReservation/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 000000000..535cbadfd --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/gen.sh b/hotelReservation/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 000000000..aff942205 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/hotelReservation/vendor/github.com/klauspost/compress/huff0/.gitignore b/hotelReservation/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 000000000..b3d262958 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/hotelReservation/vendor/github.com/klauspost/compress/huff0/README.md b/hotelReservation/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 000000000..8b6e5c663 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/hotelReservation/vendor/github.com/klauspost/compress/huff0/bitreader.go b/hotelReservation/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 000000000..a4979e886 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,329 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBit32(uint32(v))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) peekBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +func (b *bitReader) advance(n uint8) { + b.bitsRead += n +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderShifted) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/hotelReservation/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 000000000..6bce4e87d --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,210 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + return + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + b.bitContainer >>= 1 << 3 + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + b.bitContainer >>= 2 << 3 + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + b.bitContainer >>= 3 << 3 + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + b.bitContainer >>= 4 << 3 + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + b.bitContainer >>= 5 << 3 + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + b.bitContainer >>= 6 << 3 + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + b.bitContainer >>= 7 << 3 + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + b.bitContainer = 0 + b.nBits = 0 + return + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/huff0/bytereader.go b/hotelReservation/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 000000000..50bcdf6ea --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,54 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/huff0/compress.go b/hotelReservation/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 000000000..8323dc053 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,720 @@ +package huff0 + +import ( + "fmt" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/huff0/decompress.go b/hotelReservation/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 000000000..9b7cc8e97 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1371 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle + double []dEntryDouble +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// double-symbols decoding +type dEntryDouble struct { + seq uint16 + nBits uint8 + len uint8 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(in) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := br.off*8 + uint(64-br.bitsRead) + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (8 - d.actualTableLog) & 7 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 0 + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/huff0/huff0.go b/hotelReservation/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 000000000..3ee00ecb4 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,335 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 000000000..40796a49d --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 000000000..77395a6b8 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 000000000..13c6040a5 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 000000000..511bba65d --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,236 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 000000000..34d01f4aa --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/s2sx.mod b/hotelReservation/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 000000000..2263853fc --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/hotelReservation/vendor/github.com/klauspost/compress/s2sx.sum b/hotelReservation/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 000000000..e69de29bb diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/README.md b/hotelReservation/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 000000000..c8f0f16fc --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently, but each call will only run on a single goroutine. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73101992 643 313.87 +silesia.tar zskp 2 211947520 67504318 969 208.38 +silesia.tar zskp 3 211947520 64595893 2007 100.68 +silesia.tar zskp 4 211947520 60995370 8825 22.90 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1654 122.21 +silesia.tar gzkp 1 211947520 80136201 1152 175.45 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 235022249 3088 590.30 +gob-stream zskp 2 1911399616 205669791 3786 481.34 +gob-stream zskp 3 1911399616 175034659 9636 189.17 +gob-stream zskp 4 1911399616 165609838 50369 36.19 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382641 10251 177.82 +gob-stream gzkp 1 1911399616 359753026 5438 335.20 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343848582 3609 264.18 +enwik9 zskp 2 1000000000 317276632 5746 165.97 +enwik9 zskp 3 1000000000 292243069 12162 78.41 +enwik9 zskp 4 1000000000 262183768 82837 11.51 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 9604 99.30 +enwik9 gzkp 1 1000000000 383825945 6544 145.73 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 +github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 +github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 +github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 +github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 +rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 +rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 +rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 +rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 +nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 +nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 +nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. +See "Allocation-less operation" below. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder operates on + +* One goroutine reads input and splits the input to several block decoders. +* A number of decoders will decode blocks. +* A goroutine coordinates these blocks and sends history from one to the next. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 2 cores effectively. + + +### Benchmarks + +These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). + +The first two are streaming decodes and the last are smaller inputs. + +``` +BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op +BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op + +BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op +BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op + +Concurrent performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op + +BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2020, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/bitreader.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 000000000..854458537 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) int { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return int(v) +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 000000000..303ae90f9 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,169 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 32 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/blockdec.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 000000000..8a98c4562 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,736 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header + maxCompressedLiteralSize = 1 << 18 + maxRLELiteralSize = 1 << 20 + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + history chan *history + input chan struct{} + result chan decodeOutput + sequenceBuf []seq + err error + decWG sync.WaitGroup + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + // Block is RLE, this is the size. + RLESize uint32 + tmp [4]byte + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + result: make(chan decodeOutput, 1), + input: make(chan struct{}, 1), + history: make(chan *history, 1), + } + b.decWG.Add(1) + go b.startDecoder() + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxBlockSize + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSize + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + case blockTypeRaw: + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSize) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err + b.input <- struct{}{} +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { + close(b.input) + close(b.history) + close(b.result) + b.decWG.Wait() +} + +// decodeAsync will prepare decoding the block when it receives input. +// This will separate output and history. +func (b *blockDec) startDecoder() { + defer b.decWG.Done() + for range b.input { + //println("blockDec: Got block input") + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + o := decodeOutput{ + d: b, + b: b.dst[:b.RLESize], + err: nil, + } + v := b.data[0] + for i := range o.b { + o.b[i] = v + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeRaw: + o := decodeOutput{ + d: b, + b: b.data, + err: nil, + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeCompressed: + b.dst = b.dst[:0] + err := b.decodeCompressed(nil) + o := decodeOutput{ + d: b, + b: b.dst, + err: err, + } + if debugDecoder { + println("Decompressed to", len(b.dst), "bytes, error:", err) + } + b.result <- o + case blockTypeReserved: + // Used for returning errors. + <-b.history + b.result <- decodeOutput{ + d: b, + b: nil, + err: b.err, + } + default: + panic("Invalid block type") + } + if debugDecoder { + println("blockDec: Finished block") + } + } +} + +// decodeAsync will prepare decoding the block when it receives the history. +// If history is provided, it will not fetch it from the channel. +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + b.dst = hist.b + hist.b = nil + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + hist.b = b.dst + b.dst = saved + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +// decodeCompressed will start decompressing a block. +// If no history is supplied the decoder will decodeAsync as much as possible +// before fetching from blockDec.history +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + delayedHistory := hist == nil + + if delayedHistory { + // We must always grab history. + defer func() { + if hist == nil { + <-b.history + } + }() + } + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return ErrBlockTooSmall + } + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + var literals []byte + var huff *huff0.Scratch + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize) + } else { + if litRegenSize > maxCompressedLiteralSize { + // Exceptional + b.literalBuf = make([]byte, litRegenSize) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) + + } + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + huff = huffDecoderPool.Get().(*huff0.Scratch) + var err error + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + if huff == nil { + huff = &huff0.Scratch{} + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return err + } + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + seqHeader := in[0] + nSeqs := 0 + switch { + case seqHeader == 0: + in = in[1:] + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + // Allocate sequences + if cap(b.sequenceBuf) < nSeqs { + if b.lowMem { + b.sequenceBuf = make([]seq, nSeqs) + } else { + // Allocate max + b.sequenceBuf = make([]seq, nSeqs, maxSequences) + } + } else { + // Reuse buffer + b.sequenceBuf = b.sequenceBuf[:nSeqs] + } + var seqs = &sequenceDecs{} + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + dec := fseDecoderPool.Get().(*fseDecoder) + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + dec.setRLE(symb) + seq.fse = dec + if debugDecoder { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + dec := fseDecoderPool.Get().(*fseDecoder) + err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + seq.fse = dec + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + + // Wait for history. + // All time spent after this is critical since it is strictly sequential. + if hist == nil { + hist = <-b.history + if hist.error { + return ErrDecoderClosed + } + } + + // Decode treeless literal block. + if litType == literalsBlockTreeless { + // TODO: We could send the history early WITHOUT the stream history. + // This would allow decoding treeless literals before the byte history is available. + // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. + // So not much obvious gain here. + + if hist.huffTree == nil { + return errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff = hist.huffTree + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return err + } + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + } else { + if hist.huffTree != nil && huff != nil { + if hist.dict == nil || hist.dict.litEnc != hist.huffTree { + huffDecoderPool.Put(hist.huffTree) + } + hist.huffTree = nil + } + } + if huff != nil { + hist.huffTree = huff + } + if debugDecoder { + println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + } + + if nSeqs == 0 { + // Decompressed content is defined entirely as Literals Section content. + b.dst = append(b.dst, literals...) + if delayedHistory { + hist.append(literals) + } + return nil + } + + seqs, err := seqs.mergeHistory(&hist.decoders) + if err != nil { + return err + } + if debugDecoder { + println("History merged ok") + } + br := &bitReader{} + if err := br.init(in); err != nil { + return err + } + + // TODO: Investigate if sending history without decoders are faster. + // This would allow the sequences to be decoded async and only have to construct stream history. + // If only recent offsets were not transferred, this would be an obvious win. + // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history any more. + if hist.dict != nil { + hist.dict.content = nil + } + } + + if err := seqs.initialize(br, hist, literals, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + err = seqs.decode(nSeqs, br, hbytes) + if err != nil { + return err + } + if !br.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) + } + + err = br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = seqs.out + seqs.out, seqs.literals, seqs.hist = nil, nil, nil + + if !delayedHistory { + // If we don't have delayed history, no need to update. + hist.recentOffsets = seqs.prevOffset + return nil + } + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } + hist.append(b.dst) + hist.recentOffsets = seqs.prevOffset + if debugDecoder { + println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") + } + + return nil +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/blockenc.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 000000000..3df185ee4 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,871 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 200 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram()[:256] + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { + // No need to flush (common) + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is 8 for all. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // We checked that all can stay within 32 bits + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } else { + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is below 8 for each. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // ml+ll = max 32 bits total + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } + ml.flush(mlEnc.actualTableLog) + of.flush(ofEnc.actualTableLog) + ll.flush(llEnc.actualTableLog) + err = wr.close() + if err != nil { + return err + } + b.output = wr.out + + if len(b.output)-3-bhOffset >= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram()[:256] + ofH := b.coders.ofEnc.Histogram()[:256] + mlH := b.coders.mlEnc.Histogram()[:256] + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i, seq := range b.sequences { + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + b.sequences[i] = seq + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 000000000..01a01e486 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 000000000..aab71c6cf --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,130 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "io/ioutil" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) remain() []byte { + return *b +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int) error { + bb := *b + if len(bb) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int) error { + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/bytereader.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 000000000..2c4fca17f --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,88 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 000000000..69736e8d4 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,202 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "bytes" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // Window Size the window of data to keep while decoding. + // Will only be set if HasFCS is false. + WindowSize uint64 + + // Frame content size. + // Expected size of the entire frame. + FrameContentSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // Skippable will be true if the frame is meant to be skipped. + // No other information will be populated. + Skippable bool + + // If set there is a checksum present for the block content. + HasCheckSum bool + + // If this is true FrameContentSize will have a valid value + HasFCS bool + + SingleSegment bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + b, in := in[:4], in[4:] + if !bytes.Equal(b, frameMagic) { + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + *h = Header{Skippable: true} + return nil + } + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + + // Clear output + *h = Header{} + fhd, in := in[0], in[1:] + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + if b == nil { + return io.ErrUnexpectedEOF + } + switch size { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + if b == nil { + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/decoder.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 000000000..f430f58b5 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,555 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Streams ready to be decoded. + stream chan decodeStream + + // Current read position used for Reader functionality. + current decoderState + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel chan struct{} + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, nil + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.current.b) > 0 { + dst = d.current.b + } + + dst, err := d.DecodeAll(b, dst[:0]) + if err == nil { + err = io.EOF + } + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + + if d.stream == nil { + d.stream = make(chan decodeStream, 1) + d.streamWg.Add(1) + go d.startStreamDecoder(d.stream) + } + + // Remove current block. + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.cancel = make(chan struct{}) + d.current.flushed = false + d.current.d = nil + + d.stream <- decodeStream{ + r: r, + output: d.current.output, + cancel: d.current.cancel, + } + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + println("cancelling current") + close(d.current.cancel) + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + if v.err == errEndOfStream { + println("current flushed") + d.current.flushed = true + return + } + } +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.current.err == ErrDecoderClosed { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + frame.history.setDict(&dict) + } + if err != nil { + return dst, err + } + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { + // Never preallocate moe than 1 GB up front. + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + copy(dst2, dst) + dst = dst2 + } + } + if cap(dst) == 0 { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } + if d.current.err != nil { + // Keep error state. + return blocking + } + + if blocking { + d.current.decodeOutput = <-d.current.output + } else { + select { + case d.current.decodeOutput = <-d.current.output: + default: + return false + } + } + if debugDecoder { + println("got", len(d.current.b), "bytes, error:", d.current.err) + } + return true +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.stream != nil { + close(d.stream) + d.streamWg.Wait() + d.stream = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +type decodeStream struct { + r io.Reader + + // Blocks ready to be written to output. + output chan decodeOutput + + // cancel reading from the input + cancel chan struct{} +} + +// errEndOfStream indicates that everything from the stream was read. +var errEndOfStream = errors.New("end-of-stream") + +// Create Decoder: +// Spawn n block decoders. These accept tasks to decode a block. +// Create goroutine that handles stream processing, this will send history to decoders as they are available. +// Decoders update the history as they decode. +// When a block is returned: +// a) history is sent to the next decoder, +// b) content written to CRC. +// c) return data to WRITER. +// d) wait for next block to return data. +// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. +func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { + defer d.streamWg.Done() + frame := newFrameDec(d.o) + for stream := range inStream { + if debugDecoder { + println("got new stream") + } + br := readerWrapper{r: stream.r} + decodeStream: + for { + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err != nil { + stream.output <- decodeOutput{ + err: err, + } + break + } + if debugDecoder { + println("starting frame decoder") + } + + // This goroutine will forward history between frames. + frame.frameDone.Add(1) + frame.initAsync() + + go frame.startDecoder(stream.output) + decodeFrame: + // Go through all blocks of the frame. + for { + dec := <-d.decoders + select { + case <-stream.cancel: + if !frame.sendErr(dec, io.EOF) { + // To not let the decoder dangle, send it back. + stream.output <- decodeOutput{d: dec} + } + break decodeStream + default: + } + err := frame.next(dec) + switch err { + case io.EOF: + // End of current frame, no error + println("EOF on next block") + break decodeFrame + case nil: + continue + default: + println("block decoder returned", err) + break decodeStream + } + } + // All blocks have started decoding, check if there are more frames. + println("waiting for done") + frame.frameDone.Wait() + println("done waiting...") + } + frame.frameDone.Wait() + println("Sending EOS") + stream.output <- decodeOutput{err: errEndOfStream} + } +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 000000000..95cc9b8b8 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,102 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []dict +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + } + o.maxDecodedSize = 1 << 63 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n <= 0 { + return errors.New("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum and default is 1 << 63 bytes. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/dict.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 000000000..a36ae83ef --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,122 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_base.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 000000000..295cd602a --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,178 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_best.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 000000000..96028ecd8 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,558 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = prevEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + bestOf := func(a, b match) match { + if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{s: s, est: highScore} + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + m.estBits(bitsPerByte) + return m + } + + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + + if canRepeat && best.length < goodEnough { + cv32 := uint32(cv >> 8) + spp := s + 1 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + if best.length > 0 { + cv32 = uint32(cv >> 24) + spp += 2 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + // Long at s+1, s+2 + best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) + best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + if false { + // Short at s+3. + // Too often worse... + best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + } + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { + bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) + if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { + bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + } + best = bestEnd + } + } + } + + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_better.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 000000000..602c05ee0 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1237 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 000000000..d6b310424 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1124 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 000000000..f2502629b --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,1019 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "math/bits" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + // length := 4 + e.matchlen(s+6, repIndex+4, src) + // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + var length int32 + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + //l := e.matchlenNoHist(s+4, t+4, src) + 4 + // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlenNoHist(s+4, o2+4, src) + // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/encoder.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 000000000..e6e315969 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,599 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + s.filling = s.filling[:0] + s.current = s.current[:0] + s.previous = s.previous[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below 1MB. + single := len(src) < 1<<20 && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= maxCompressedBlockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 000000000..7d29e1d68 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,312 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: 1 << 16, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: true, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + case level >= 10: + return SpeedBestCompression + } + return SpeedDefault +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/framedec.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 000000000..989c79f8c --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,521 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc hash.Hash64 + offset int64 + + WindowSize uint64 + + // In order queue of blocks being decoded. + decoding chan *blockDec + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + frameDone sync.WaitGroup + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool + + // asyncRunning indicates whether the async routine processes input on 'decoding'. + asyncRunningMu sync.Mutex + asyncRunning bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + default: + return err + case nil: + signature[0] = b[0] + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + default: + return err + case nil: + copy(signature[1:], b) + } + + if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(signature[:], frameMagic) { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", frameMagic) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = 0 + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + } + } + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + } + + if d.WindowSize > uint64(d.o.maxWindowSize) { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if d.o.lowMem && d.history.windowSize < maxBlockSize { + d.history.maxSize = d.history.windowSize * 2 + } else { + d.history.maxSize = d.history.windowSize + maxBlockSize + } + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + printf("decoding new block %p:%p", block, block.data) + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + d.sendErr(block, err) + return err + } + block.input <- struct{}{} + if debugDecoder { + println("next block:", block) + } + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return nil + } + if block.Last { + // We indicate the frame is done by sending io.EOF + d.decoding <- block + return io.EOF + } + d.decoding <- block + return nil +} + +// sendEOF will queue an error block on the frame. +// This will cause the frame decoder to return when it encounters the block. +// Returns true if the decoder was added. +func (d *frameDec) sendErr(block *blockDec, err error) bool { + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return false + } + + println("sending error", err.Error()) + block.sendErr(err) + d.decoding <- block + return true +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + // We can overwrite upper tmp now + want, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + if !bytes.Equal(tmp[:], want) { + if debugDecoder { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debugDecoder { + println("CRC ok", tmp[:]) + } + return nil +} + +func (d *frameDec) initAsync() { + if !d.o.lowMem && !d.SingleSegment { + // set max extra size history to 2MB. + d.history.maxSize = d.history.windowSize + maxBlockSize + } + // re-alloc if more than one extra block size. + if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.history.b) < d.history.maxSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.decoding) < d.o.concurrent { + d.decoding = make(chan *blockDec, d.o.concurrent) + } + if debugDecoder { + h := d.history + printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) + } + d.asyncRunningMu.Lock() + d.asyncRunning = true + d.asyncRunningMu.Unlock() +} + +// startDecoder will start decoding blocks and write them to the writer. +// The decoder will stop as soon as an error occurs or at end of frame. +// When the frame has finished decoding the *bufio.Reader +// containing the remaining input will be sent on frameDec.frameDone. +func (d *frameDec) startDecoder(output chan decodeOutput) { + written := int64(0) + + defer func() { + d.asyncRunningMu.Lock() + d.asyncRunning = false + d.asyncRunningMu.Unlock() + + // Drain the currently decoding. + d.history.error = true + flushdone: + for { + select { + case b := <-d.decoding: + b.history <- &d.history + output <- <-b.result + default: + break flushdone + } + } + println("frame decoder done, signalling done") + d.frameDone.Done() + }() + // Get decoder for first block. + block := <-d.decoding + block.history <- &d.history + for { + var next *blockDec + // Get result + r := <-block.result + if r.err != nil { + println("Result contained error", r.err) + output <- r + return + } + if debugDecoder { + println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) + d.offset += int64(len(r.b)) + } + if !block.Last { + // Send history to next block + select { + case next = <-d.decoding: + if debugDecoder { + println("Sending ", len(d.history.b), "bytes as history") + } + next.history <- &d.history + default: + // Wait until we have sent the block, so + // other decoders can potentially get the decoder. + next = nil + } + } + + // Add checksum, async to decoding. + if d.HasCheckSum { + n, err := d.crc.Write(r.b) + if err != nil { + r.err = err + if n != len(r.b) { + r.err = io.ErrShortWrite + } + output <- r + return + } + } + written += int64(len(r.b)) + if d.SingleSegment && uint64(written) > d.FrameContentSize { + println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) + r.err = ErrFrameSizeExceeded + output <- r + return + } + if block.Last { + r.err = d.checkCRC() + output <- r + return + } + output <- r + if next == nil { + // There was no decoder available, we wait for one now that we have sent to the writer. + if debugDecoder { + println("Sending ", len(d.history.b), " bytes as history") + } + next = <-d.decoding + next.history <- &d.history + } + block = next + } +} + +// runDecoder will create a sync decoder that will decode a block of data. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + // Store input length, so we only check new data. + crcStart := len(dst) + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil || dec.Last { + break + } + if uint64(len(d.history.b)) > d.o.maxDecodedSize { + err = ErrDecoderSizeExceeded + break + } + if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { + println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + err = ErrFrameSizeExceeded + break + } + } + dst = d.history.b + if err == nil { + if d.HasCheckSum { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/frameenc.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 000000000..4ef7f5a3e --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 000000000..e6d3d49b3 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,385 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + // println(s.norm[:s.symbolLen], s.symbolLen) + return s.buildDtable() +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// next returns the current symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) next(br *bitReader) { + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (s *fseState) finished(br *bitReader) bool { + return br.finished() && s.state.nbBits() > 0 +} + +// final returns the current state symbol without decoding the next. +func (s *fseState) final() (int, uint8) { + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 000000000..b4757ee3f --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,725 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *fseEncoder) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *fseEncoder) prepare() (*fseEncoder, error) { + if s == nil { + s = &fseEncoder{} + } + s.useRLE = false + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + return s, nil +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 000000000..474cb77d2 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/hash.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 000000000..cf33f29a1 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,41 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} + +// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash3(u uint32, h uint8) uint32 { + return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/history.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 000000000..f783e32d2 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,89 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + b []byte + huffTree *huff0.Scratch + recentOffsets [3]int + decoders sequenceDecs + windowSize int + maxSize int + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + h.decoders = sequenceDecs{} + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + } + } + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 000000000..24b53065f --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 000000000..69aa3bb58 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,58 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 000000000..2c112a0ab --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,237 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go new file mode 100644 index 000000000..0ae847f75 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go @@ -0,0 +1,12 @@ +//go:build !appengine && gc && !purego +// +build !appengine,gc,!purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 000000000..be8db5bf7 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// SI pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// DI prime4v + +// round reads from and advances the buffer pointer in SI. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ DI, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), DI + + // Load slice. + MOVQ b_base+0(FP), SI + MOVQ b_len+8(FP), DX + LEAQ (SI)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until SI > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. + ADDQ $24, BX + + CMPQ SI, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (SI), R8 + ADDQ $8, SI + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ DI, AX + + CMPQ SI, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ SI, BX + JG singles + + MOVL (SI), R8 + ADDQ $4, SI + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ SI, BX + JGE finalize + +singlesLoop: + MOVBQZX (SI), R12 + ADDQ $1, SI + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ SI, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), SI + MOVQ b_len+16(FP), DX + LEAQ (SI)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) + + RET diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 000000000..1f52f296e --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,77 @@ +//go:build !amd64 || appengine || !gc || purego +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 000000000..6f3b0cb10 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/seqdec.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 000000000..1dd39e63b --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,492 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(s.out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", size) + } + if size > cap(s.out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(s.out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + s.out = append(s.out, s.literals[:ll]...) + s.literals = s.literals[ll:] + out := s.out + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(s.out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + mo -= len(s.dict) - dictO + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(s.out); v > 0 { + // v is the start position in history from end. + start := len(s.hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, s.hist[start:]...) + mo -= v + ml -= v + } else { + out = append(out, s.hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(s.out) - mo + if ml <= len(s.out)-start { + // No overlap + out = append(out, s.out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + s.out = out + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Add final literals + s.out = append(s.out, s.literals...) + return nil +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) update(br *bitReader) { + // Max 8 bits + s.litLengths.state.next(br) + // Max 9 bits + s.matchLengths.state.next(br) + // Max 8 bits + s.offsets.state.next(br) +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) updateAlt(br *bitReader) { + // Update all 3 states at once. Approx 20% faster. + a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + + nBits := a.nbBits() + b.nbBits() + c.nbBits() + if nBits == 0 { + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] + return + } + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] + + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] + + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] +} + +// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + return + } + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + return + } + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + return +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} + +// mergeHistory will merge history. +func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { + for i := uint(0); i < 3; i++ { + var sNew, sHist *sequenceDec + switch i { + default: + // same as "case 0": + sNew = &s.litLengths + sHist = &hist.litLengths + case 1: + sNew = &s.offsets + sHist = &hist.offsets + case 2: + sNew = &s.matchLengths + sHist = &hist.matchLengths + } + if sNew.repeat { + if sHist.fse == nil { + return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) + } + continue + } + if sNew.fse == nil { + return nil, fmt.Errorf("sequence stream %d, no fse found", i) + } + if sHist.fse != nil && !sHist.fse.preDefined { + fseDecoderPool.Put(sHist.fse) + } + sHist.fse = sNew.fse + } + return hist, nil +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/seqenc.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 000000000..8014174a7 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/snappy.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 000000000..9e1baad73 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,435 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/zip.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 000000000..967f29b31 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,122 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +var zipReaderPool sync.Pool + +// newZipReader cannot be used since we would leak goroutines... +func newZipReader(r io.Reader) io.ReadCloser { + dec, ok := zipReaderPool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec} +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("Read after Close") + } + dec, err := r.dec.Read(p) + + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + zipReaderPool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +func ZipDecompressor() func(r io.Reader) io.ReadCloser { + return func(r io.Reader) io.ReadCloser { + d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) + if err != nil { + panic(err) + } + return d.IOReadCloser() + } +} diff --git a/hotelReservation/vendor/github.com/klauspost/compress/zstd/zstd.go b/hotelReservation/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 000000000..ef1d49a00 --- /dev/null +++ b/hotelReservation/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,152 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +// matchLenFast does matching, but will not match the last up to 7 bytes. +func matchLenFast(a, b []byte) int { + endI := len(a) & (math.MaxInt32 - 7) + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + bits.TrailingZeros64(diff)>>3 + } + } + return endI +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +func load64(b []byte, i int) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/hotelReservation/vendor/github.com/mattn/go-colorable/LICENSE b/hotelReservation/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/hotelReservation/vendor/github.com/mattn/go-colorable/README.md b/hotelReservation/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 000000000..ca0483711 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 000000000..416d1bbbf --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,38 @@ +//go:build appengine +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_others.go b/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 000000000..766d94603 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +//go:build !windows && !appengine +// +build !windows,!appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_windows.go b/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 000000000..1846ad5ab --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1047 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/hotelReservation/vendor/github.com/mattn/go-colorable/go.test.sh b/hotelReservation/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/hotelReservation/vendor/github.com/mattn/go-colorable/noncolorable.go b/hotelReservation/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 000000000..05d6f74bf --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + } + } + + return len(data), nil +} diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/LICENSE b/hotelReservation/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/README.md b/hotelReservation/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..38418353e --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/doc.go b/hotelReservation/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/go.test.sh b/hotelReservation/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..d569c0c94 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,19 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly hurd +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_others.go b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..31503226f --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,16 @@ +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 000000000..bae7f9bb3 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,23 @@ +//go:build plan9 +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..0c3acf2dc --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,21 @@ +//go:build solaris && !appengine +// +build solaris,!appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 000000000..67787657f --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_windows.go b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..8e3c99171 --- /dev/null +++ b/hotelReservation/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/.gitignore b/hotelReservation/vendor/github.com/montanaflynn/stats/.gitignore new file mode 100644 index 000000000..96b11286e --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/.gitignore @@ -0,0 +1,2 @@ +coverage.out +.directory \ No newline at end of file diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/.travis.yml b/hotelReservation/vendor/github.com/montanaflynn/stats/.travis.yml new file mode 100644 index 000000000..697dcb759 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/.travis.yml @@ -0,0 +1,20 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - tip +before_install: + - sudo pip install codecov +script: + - go test +after_success: + - codecov +notifications: + email: + recipients: + - montana@montanaflynn.me + on_success: change + on_failure: always diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/hotelReservation/vendor/github.com/montanaflynn/stats/CHANGELOG.md new file mode 100644 index 000000000..532f6ed3f --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/CHANGELOG.md @@ -0,0 +1,64 @@ +# Change Log + +## [0.2.0](https://github.com/montanaflynn/stats/tree/0.2.0) + +### Merged pull requests: + +- Fixed typographical error, changed accomdate to accommodate in README. [\#5](https://github.com/montanaflynn/stats/pull/5) ([saromanov](https://github.com/orthographic-pedant)) + +### Package changes: + +- Add `Correlation` function +- Add `Covariance` function +- Add `StandardDeviation` function to be the same as `StandardDeviationPopulation` +- Change `Variance` function to be the same as `PopulationVariation` +- Add helper methods to `Float64Data` +- Add `Float64Data` type to use instead of `[]float64` +- Add `Series` type which references to `[]Coordinate` + +## [0.1.0](https://github.com/montanaflynn/stats/tree/0.1.0) + +Several functions were renamed in this release. They will still function but may be deprecated in the future. + +### Package changes: + +- Rename `VarP` to `PopulationVariance` +- Rename `VarS` to `SampleVariance` +- Rename `LinReg` to `LinearRegression` +- Rename `ExpReg` to `ExponentialRegression` +- Rename `LogReg` to `LogarithmicRegression` +- Rename `StdDevP` to `StandardDeviationPopulation` +- Rename `StdDevS` to `StandardDeviationSample` + +## [0.0.9](https://github.com/montanaflynn/stats/tree/0.0.9) + +### Closed issues: + +- Functions have unexpected side effects [\#3](https://github.com/montanaflynn/stats/issues/3) +- Percentile is not calculated correctly [\#2](https://github.com/montanaflynn/stats/issues/2) + +### Merged pull requests: + +- Sample [\#4](https://github.com/montanaflynn/stats/pull/4) ([saromanov](https://github.com/saromanov)) + +### Package changes: + +- Add HarmonicMean func +- Add GeometricMean func +- Add Outliers stuct and QuantileOutliers func +- Add Interquartile Range, Midhinge and Trimean examples +- Add Trimean +- Add Midhinge +- Add Inter Quartile Range +- Add Quantiles struct and Quantile func +- Add Nearest Rank method of calculating percentiles +- Add errors for all functions +- Add sample +- Add Linear, Exponential and Logarithmic Regression +- Add sample and population variance and deviation +- Add Percentile and Float64ToInt +- Add Round +- Add Standard deviation +- Add Sum +- Add Min and Ma- x +- Add Mean, Median and Mode diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/LICENSE b/hotelReservation/vendor/github.com/montanaflynn/stats/LICENSE new file mode 100644 index 000000000..664818176 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2015 Montana Flynn (https://anonfunction.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/Makefile b/hotelReservation/vendor/github.com/montanaflynn/stats/Makefile new file mode 100644 index 000000000..87844f485 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/Makefile @@ -0,0 +1,29 @@ +.PHONY: all + +doc: + godoc `pwd` + +webdoc: + godoc -http=:44444 + +format: + go fmt + +test: + go test -race + +check: format test + +benchmark: + go test -bench=. -benchmem + +coverage: + go test -coverprofile=coverage.out + go tool cover -html="coverage.out" + +lint: format + go get github.com/alecthomas/gometalinter + gometalinter --install + gometalinter + +default: lint test diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/README.md b/hotelReservation/vendor/github.com/montanaflynn/stats/README.md new file mode 100644 index 000000000..5f8a9291b --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/README.md @@ -0,0 +1,103 @@ +# Stats [![][travis-svg]][travis-url] [![][coveralls-svg]][coveralls-url] [![][godoc-svg]][godoc-url] [![][license-svg]][license-url] + +A statistics package with many functions missing from the Golang standard library. See the [CHANGELOG.md](https://github.com/montanaflynn/stats/blob/master/CHANGELOG.md) for API changes and tagged releases you can vendor into your projects. + +> Statistics are used much like a drunk uses a lamppost: for support, not illumination. **- Vin Scully** + +## Installation + +``` +go get github.com/montanaflynn/stats +``` + +**Protip:** `go get -u github.com/montanaflynn/stats` updates stats to the latest version. + +## Usage + +The [entire API documentation](http://godoc.org/github.com/montanaflynn/stats) is available on GoDoc.org + +You can view docs offline with the following commands: + +``` +godoc ./ +godoc ./ Median +godoc ./ Float64Data +``` + +**Protip:** Generate HTML docs with `godoc -http=:4444` + +## Example + +All the functions can be seen in [examples/main.go](https://github.com/montanaflynn/stats/blob/master/examples/main.go) but here's a little taste: + +```go +// start with the some source data to use +var data = []float64{1, 2, 3, 4, 4, 5} + +median, _ := stats.Median(data) +fmt.Println(median) // 3.5 + +roundedMedian, _ := stats.Round(median, 0) +fmt.Println(roundedMedian) // 4 +``` + +**Protip:** You can [call methods](https://github.com/montanaflynn/stats/blob/master/examples/methods.go) on the data if using the Float64Data type: + +``` +var d stats.Float64Data = data + +max, _ := d.Max() +fmt.Println(max) // 5 +``` + +## Contributing + +If you have any suggestions, criticism or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated! + +### Pull Requests + +Pull request are always welcome no matter how big or small. Here's an easy way to do it: + +1. Fork it and clone your fork +2. Create new branch (`git checkout -b some-thing`) +3. Make the desired changes +4. Ensure tests pass (`go test -cover` or `make test`) +5. Commit changes (`git commit -am 'Did something'`) +6. Push branch (`git push origin some-thing`) +7. Submit pull request + +To make things as seamless as possible please also consider the following steps: + +- Update `README.md` to include new public types or functions in the documentation section. +- Update `examples/main.go` with a simple example of the new feature. +- Keep 100% code coverage (you can check with `make coverage`). +- Run [`gometalinter`](https://github.com/alecthomas/gometalinter) and make your code pass. +- Squash needless commits into single units of work with `git rebase -i new-feature`. + +#### Makefile + +I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more. + +**Protip:** `watch -n 1 make check` will continuously format and test your code. + +## MIT License + +Copyright (c) 2014-2015 Montana Flynn + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +[travis-url]: https://travis-ci.org/montanaflynn/stats +[travis-svg]: https://img.shields.io/travis/montanaflynn/stats.svg + +[coveralls-url]: https://coveralls.io/r/montanaflynn/stats?branch=master +[coveralls-svg]: https://img.shields.io/coveralls/montanaflynn/stats.svg + +[godoc-url]: https://godoc.org/github.com/montanaflynn/stats +[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg + +[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE +[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/correlation.go b/hotelReservation/vendor/github.com/montanaflynn/stats/correlation.go new file mode 100644 index 000000000..d759bf8c4 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/correlation.go @@ -0,0 +1,33 @@ +package stats + +import "math" + +// Correlation describes the degree of relationship between two sets of data +func Correlation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInput + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + sdev1, _ := StandardDeviationPopulation(data1) + sdev2, _ := StandardDeviationPopulation(data2) + + if sdev1 == 0 || sdev2 == 0 { + return 0, nil + } + + covp, _ := CovariancePopulation(data1, data2) + return covp / (sdev1 * sdev2), nil +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables. +func Pearson(data1, data2 Float64Data) (float64, error) { + return Correlation(data1, data2) +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/data.go b/hotelReservation/vendor/github.com/montanaflynn/stats/data.go new file mode 100644 index 000000000..a087f457a --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/data.go @@ -0,0 +1,140 @@ +package stats + +// Float64Data is a named type for []float64 with helper methods +type Float64Data []float64 + +// Get item in slice +func (f Float64Data) Get(i int) float64 { return f[i] } + +// Len returns length of slice +func (f Float64Data) Len() int { return len(f) } + +// Less returns if one number is less than another +func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] } + +// Swap switches out two numbers in slice +func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// Min returns the minimum number in the data +func (f Float64Data) Min() (float64, error) { return Min(f) } + +// Max returns the maximum number in the data +func (f Float64Data) Max() (float64, error) { return Max(f) } + +// Sum returns the total of all the numbers in the data +func (f Float64Data) Sum() (float64, error) { return Sum(f) } + +// Mean returns the mean of the data +func (f Float64Data) Mean() (float64, error) { return Mean(f) } + +// Median returns the median of the data +func (f Float64Data) Median() (float64, error) { return Median(f) } + +// Mode returns the mode of the data +func (f Float64Data) Mode() ([]float64, error) { return Mode(f) } + +// GeometricMean returns the median of the data +func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) } + +// HarmonicMean returns the mode of the data +func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) } + +// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median +func (f Float64Data) MedianAbsoluteDeviation() (float64, error) { + return MedianAbsoluteDeviation(f) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) { + return MedianAbsoluteDeviationPopulation(f) +} + +// StandardDeviation the amount of variation in the dataset +func (f Float64Data) StandardDeviation() (float64, error) { + return StandardDeviation(f) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func (f Float64Data) StandardDeviationPopulation() (float64, error) { + return StandardDeviationPopulation(f) +} + +// StandardDeviationSample finds the amount of variation from a sample +func (f Float64Data) StandardDeviationSample() (float64, error) { + return StandardDeviationSample(f) +} + +// QuartileOutliers finds the mild and extreme outliers +func (f Float64Data) QuartileOutliers() (Outliers, error) { + return QuartileOutliers(f) +} + +// Percentile finds the relative standing in a slice of floats +func (f Float64Data) Percentile(p float64) (float64, error) { + return Percentile(f, p) +} + +// PercentileNearestRank finds the relative standing using the Nearest Rank method +func (f Float64Data) PercentileNearestRank(p float64) (float64, error) { + return PercentileNearestRank(f, p) +} + +// Correlation describes the degree of relationship between two sets of data +func (f Float64Data) Correlation(d Float64Data) (float64, error) { + return Correlation(f, d) +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables. +func (f Float64Data) Pearson(d Float64Data) (float64, error) { + return Pearson(f, d) +} + +// Quartile returns the three quartile points from a slice of data +func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) { + return Quartile(d) +} + +// InterQuartileRange finds the range between Q1 and Q3 +func (f Float64Data) InterQuartileRange() (float64, error) { + return InterQuartileRange(f) +} + +// Midhinge finds the average of the first and third quartiles +func (f Float64Data) Midhinge(d Float64Data) (float64, error) { + return Midhinge(d) +} + +// Trimean finds the average of the median and the midhinge +func (f Float64Data) Trimean(d Float64Data) (float64, error) { + return Trimean(d) +} + +// Sample returns sample from input with replacement or without +func (f Float64Data) Sample(n int, r bool) ([]float64, error) { + return Sample(f, n, r) +} + +// Variance the amount of variation in the dataset +func (f Float64Data) Variance() (float64, error) { + return Variance(f) +} + +// PopulationVariance finds the amount of variance within a population +func (f Float64Data) PopulationVariance() (float64, error) { + return PopulationVariance(f) +} + +// SampleVariance finds the amount of variance within a sample +func (f Float64Data) SampleVariance() (float64, error) { + return SampleVariance(f) +} + +// Covariance is a measure of how much two sets of data change +func (f Float64Data) Covariance(d Float64Data) (float64, error) { + return Covariance(f, d) +} + +// CovariancePopulation computes covariance for entire population between two variables. +func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) { + return CovariancePopulation(f, d) +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/data_set_distances.go b/hotelReservation/vendor/github.com/montanaflynn/stats/data_set_distances.go new file mode 100644 index 000000000..2e549c8d4 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/data_set_distances.go @@ -0,0 +1,94 @@ +package stats + +import ( + "math" +) + +// Validate data for distance calculation +func validateData(dataPointX, dataPointY []float64) error { + if len(dataPointX) == 0 || len(dataPointY) == 0 { + return EmptyInput + } + + if len(dataPointX) != len(dataPointY) { + return SizeErr + } + return nil +} + +// Computes Chebyshev distance between two data sets +func ChebyshevDistance(dataPointX, dataPointY []float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + var tempDistance float64 + for i := 0; i < len(dataPointY); i++ { + tempDistance = math.Abs(dataPointX[i] - dataPointY[i]) + if distance < tempDistance { + distance = tempDistance + } + } + return distance, nil +} + +// +// Computes Euclidean distance between two data sets +// +func EuclideanDistance(dataPointX, dataPointY []float64) (distance float64, err error) { + + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i])) + } + return math.Sqrt(distance), nil +} + +// +// Computes Manhattan distance between two data sets +// +func ManhattanDistance(dataPointX, dataPointY []float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + math.Abs(dataPointX[i]-dataPointY[i]) + } + return distance, nil +} + +// +// Computes minkowski distance between two data sets. +// +// Input: +// dataPointX: First set of data points +// dataPointY: Second set of data points. Length of both data +// sets must be equal. +// lambda: aka p or city blocks; With lambda = 1 +// returned distance is manhattan distance and +// lambda = 2; it is euclidean distance. Lambda +// reaching to infinite - distance would be chebysev +// distance. +// Output: +// Distance or error +// +func MinkowskiDistance(dataPointX, dataPointY []float64, lambda float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + for i := 0; i < len(dataPointY); i++ { + distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda) + } + distance = math.Pow(distance, float64(1/lambda)) + if math.IsInf(distance, 1) == true { + return math.NaN(), InfValue + } + return distance, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/deviation.go b/hotelReservation/vendor/github.com/montanaflynn/stats/deviation.go new file mode 100644 index 000000000..539c02bcf --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/deviation.go @@ -0,0 +1,57 @@ +package stats + +import "math" + +// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) { + return MedianAbsoluteDeviationPopulation(input) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) { + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + i := copyslice(input) + m, _ := Median(i) + + for key, value := range i { + i[key] = math.Abs(value - m) + } + + return Median(i) +} + +// StandardDeviation the amount of variation in the dataset +func StandardDeviation(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Get the population variance + vp, _ := PopulationVariance(input) + + // Return the population standard deviation + return math.Pow(vp, 0.5), nil +} + +// StandardDeviationSample finds the amount of variation from a sample +func StandardDeviationSample(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Get the sample variance + vs, _ := SampleVariance(input) + + // Return the sample standard deviation + return math.Pow(vs, 0.5), nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/errors.go b/hotelReservation/vendor/github.com/montanaflynn/stats/errors.go new file mode 100644 index 000000000..0bb32f0dd --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/errors.go @@ -0,0 +1,22 @@ +package stats + +type statsErr struct { + err string +} + +func (s statsErr) Error() string { + return s.err +} + +// These are the package-wide error values. +// All error identification should use these values. +var ( + EmptyInput = statsErr{"Input must not be empty."} + SampleSize = statsErr{"Samples number must be less than input length."} + NaNErr = statsErr{"Not a number"} + NegativeErr = statsErr{"Slice must not contain negative values."} + ZeroErr = statsErr{"Slice must not contain zero values."} + BoundsErr = statsErr{"Input is outside of range."} + SizeErr = statsErr{"Slices must be the same length."} + InfValue = statsErr{"Value is infinite."} +) diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/legacy.go b/hotelReservation/vendor/github.com/montanaflynn/stats/legacy.go new file mode 100644 index 000000000..17557abd9 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/legacy.go @@ -0,0 +1,36 @@ +package stats + +// VarP is a shortcut to PopulationVariance +func VarP(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// VarS is a shortcut to SampleVariance +func VarS(input Float64Data) (sdev float64, err error) { + return SampleVariance(input) +} + +// StdDevP is a shortcut to StandardDeviationPopulation +func StdDevP(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StdDevS is a shortcut to StandardDeviationSample +func StdDevS(input Float64Data) (sdev float64, err error) { + return StandardDeviationSample(input) +} + +// LinReg is a shortcut to LinearRegression +func LinReg(s []Coordinate) (regressions []Coordinate, err error) { + return LinearRegression(s) +} + +// ExpReg is a shortcut to ExponentialRegression +func ExpReg(s []Coordinate) (regressions []Coordinate, err error) { + return ExponentialRegression(s) +} + +// LogReg is a shortcut to LogarithmicRegression +func LogReg(s []Coordinate) (regressions []Coordinate, err error) { + return LogarithmicRegression(s) +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/load.go b/hotelReservation/vendor/github.com/montanaflynn/stats/load.go new file mode 100644 index 000000000..1012d0bb5 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/load.go @@ -0,0 +1,184 @@ +package stats + +import ( + "strconv" + "time" +) + +// LoadRawData parses and converts a slice of mixed data types to floats +func LoadRawData(raw interface{}) (f Float64Data) { + var r []interface{} + var s Float64Data + + switch t := raw.(type) { + case []interface{}: + r = t + case []uint: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []bool: + for _, v := range t { + if v == true { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case []float64: + return Float64Data(t) + case []int: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []string: + for _, v := range t { + r = append(r, v) + } + case []time.Duration: + for _, v := range t { + r = append(r, v) + } + case map[int]int: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]string: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + case map[int]uint: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]bool: + for i := 0; i < len(t); i++ { + if t[i] == true { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case map[int]float64: + for i := 0; i < len(t); i++ { + s = append(s, t[i]) + } + return s + case map[int]time.Duration: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + } + + for _, v := range r { + switch t := v.(type) { + case int: + a := float64(t) + f = append(f, a) + case uint: + f = append(f, float64(t)) + case float64: + f = append(f, t) + case string: + fl, err := strconv.ParseFloat(t, 64) + if err == nil { + f = append(f, fl) + } + case bool: + if t == true { + f = append(f, 1.0) + } else { + f = append(f, 0.0) + } + case time.Duration: + f = append(f, float64(t)) + } + } + return f +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/max.go b/hotelReservation/vendor/github.com/montanaflynn/stats/max.go new file mode 100644 index 000000000..d0fdd42b4 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/max.go @@ -0,0 +1,24 @@ +package stats + +import "math" + +// Max finds the highest number in a slice +func Max(input Float64Data) (max float64, err error) { + + // Return an error if there are no numbers + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Get the first value as the starting point + max = input.Get(0) + + // Loop and replace higher values + for i := 1; i < input.Len(); i++ { + if input.Get(i) > max { + max = input.Get(i) + } + } + + return max, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/mean.go b/hotelReservation/vendor/github.com/montanaflynn/stats/mean.go new file mode 100644 index 000000000..944bb6572 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/mean.go @@ -0,0 +1,60 @@ +package stats + +import "math" + +// Mean gets the average of a slice of numbers +func Mean(input Float64Data) (float64, error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + sum, _ := input.Sum() + + return sum / float64(input.Len()), nil +} + +// GeometricMean gets the geometric mean for a slice of numbers +func GeometricMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInput + } + + // Get the product of all the numbers + var p float64 + for _, n := range input { + if p == 0 { + p = n + } else { + p *= n + } + } + + // Calculate the geometric mean + return math.Pow(p, 1/float64(l)), nil +} + +// HarmonicMean gets the harmonic mean for a slice of numbers +func HarmonicMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInput + } + + // Get the sum of all the numbers reciprocals and return an + // error for values that cannot be included in harmonic mean + var p float64 + for _, n := range input { + if n < 0 { + return math.NaN(), NegativeErr + } else if n == 0 { + return math.NaN(), ZeroErr + } + p += (1 / n) + } + + return float64(l) / p, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/median.go b/hotelReservation/vendor/github.com/montanaflynn/stats/median.go new file mode 100644 index 000000000..b13d8394b --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/median.go @@ -0,0 +1,25 @@ +package stats + +import "math" + +// Median gets the median number in a slice of numbers +func Median(input Float64Data) (median float64, err error) { + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // No math is needed if there are no numbers + // For even numbers we add the two middle numbers + // and divide by two using the mean function above + // For odd numbers we just use the middle number + l := len(c) + if l == 0 { + return math.NaN(), EmptyInput + } else if l%2 == 0 { + median, _ = Mean(c[l/2-1 : l/2+1]) + } else { + median = float64(c[l/2]) + } + + return median, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/min.go b/hotelReservation/vendor/github.com/montanaflynn/stats/min.go new file mode 100644 index 000000000..4383852e1 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/min.go @@ -0,0 +1,26 @@ +package stats + +import "math" + +// Min finds the lowest number in a set of data +func Min(input Float64Data) (min float64, err error) { + + // Get the count of numbers in the slice + l := input.Len() + + // Return an error if there are no numbers + if l == 0 { + return math.NaN(), EmptyInput + } + + // Get the first value as the starting point + min = input.Get(0) + + // Iterate until done checking for a lower value + for i := 1; i < l; i++ { + if input.Get(i) < min { + min = input.Get(i) + } + } + return min, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/mode.go b/hotelReservation/vendor/github.com/montanaflynn/stats/mode.go new file mode 100644 index 000000000..1160faf28 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/mode.go @@ -0,0 +1,47 @@ +package stats + +// Mode gets the mode [most frequent value(s)] of a slice of float64s +func Mode(input Float64Data) (mode []float64, err error) { + // Return the input if there's only one number + l := input.Len() + if l == 1 { + return input, nil + } else if l == 0 { + return nil, EmptyInput + } + + c := sortedCopyDif(input) + // Traverse sorted array, + // tracking the longest repeating sequence + mode = make([]float64, 5) + cnt, maxCnt := 1, 1 + for i := 1; i < l; i++ { + switch { + case c[i] == c[i-1]: + cnt++ + case cnt == maxCnt && maxCnt != 1: + mode = append(mode, c[i-1]) + cnt = 1 + case cnt > maxCnt: + mode = append(mode[:0], c[i-1]) + maxCnt, cnt = cnt, 1 + default: + cnt = 1 + } + } + switch { + case cnt == maxCnt: + mode = append(mode, c[l-1]) + case cnt > maxCnt: + mode = append(mode[:0], c[l-1]) + maxCnt = cnt + } + + // Since length must be greater than 1, + // check for slices of distinct values + if maxCnt == 1 { + return Float64Data{}, nil + } + + return mode, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/outlier.go b/hotelReservation/vendor/github.com/montanaflynn/stats/outlier.go new file mode 100644 index 000000000..e969180ea --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/outlier.go @@ -0,0 +1,44 @@ +package stats + +// Outliers holds mild and extreme outliers found in data +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +// QuartileOutliers finds the mild and extreme outliers +func QuartileOutliers(input Float64Data) (Outliers, error) { + if input.Len() == 0 { + return Outliers{}, EmptyInput + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Calculate the quartiles and interquartile range + qs, _ := Quartile(copy) + iqr, _ := InterQuartileRange(copy) + + // Calculate the lower and upper inner and outer fences + lif := qs.Q1 - (1.5 * iqr) + uif := qs.Q3 + (1.5 * iqr) + lof := qs.Q1 - (3 * iqr) + uof := qs.Q3 + (3 * iqr) + + // Find the data points that are outside of the + // inner and upper fences and add them to mild + // and extreme outlier slices + var mild Float64Data + var extreme Float64Data + for _, v := range copy { + + if v < lof || v > uof { + extreme = append(extreme, v) + } else if v < lif || v > uif { + mild = append(mild, v) + } + } + + // Wrap them into our struct + return Outliers{mild, extreme}, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/percentile.go b/hotelReservation/vendor/github.com/montanaflynn/stats/percentile.go new file mode 100644 index 000000000..baf24d8e3 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/percentile.go @@ -0,0 +1,80 @@ +package stats + +import "math" + +// Percentile finds the relative standing in a slice of floats +func Percentile(input Float64Data, percent float64) (percentile float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + if percent <= 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Multiply percent by length of input + index := (percent / 100) * float64(len(c)) + + // Check if the index is a whole number + if index == float64(int64(index)) { + + // Convert float to int + i := int(index) + + // Find the value at the index + percentile = c[i-1] + + } else if index > 1 { + + // Convert float to int via truncation + i := int(index) + + // Find the average of the index and following values + percentile, _ = Mean(Float64Data{c[i-1], c[i]}) + + } else { + return math.NaN(), BoundsErr + } + + return percentile, nil + +} + +// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) { + + // Find the length of items in the slice + il := input.Len() + + // Return an error for empty slices + if il == 0 { + return math.NaN(), EmptyInput + } + + // Return error for less than 0 or greater than 100 percentages + if percent < 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Return the last item + if percent == 100.0 { + return c[il-1], nil + } + + // Find ordinal ranking + or := int(math.Ceil(float64(il) * percent / 100)) + + // Return the item that is in the place of the ordinal rank + if or == 0 { + return c[0], nil + } + return c[or-1], nil + +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/quartile.go b/hotelReservation/vendor/github.com/montanaflynn/stats/quartile.go new file mode 100644 index 000000000..29bb3a37a --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/quartile.go @@ -0,0 +1,74 @@ +package stats + +import "math" + +// Quartiles holds the three quartile points +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +// Quartile returns the three quartile points from a slice of data +func Quartile(input Float64Data) (Quartiles, error) { + + il := input.Len() + if il == 0 { + return Quartiles{}, EmptyInput + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Find the cutoff places depeding on if + // the input slice length is even or odd + var c1 int + var c2 int + if il%2 == 0 { + c1 = il / 2 + c2 = il / 2 + } else { + c1 = (il - 1) / 2 + c2 = c1 + 1 + } + + // Find the Medians with the cutoff points + Q1, _ := Median(copy[:c1]) + Q2, _ := Median(copy) + Q3, _ := Median(copy[c2:]) + + return Quartiles{Q1, Q2, Q3}, nil + +} + +// InterQuartileRange finds the range between Q1 and Q3 +func InterQuartileRange(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + qs, _ := Quartile(input) + iqr := qs.Q3 - qs.Q1 + return iqr, nil +} + +// Midhinge finds the average of the first and third quartiles +func Midhinge(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + qs, _ := Quartile(input) + mh := (qs.Q1 + qs.Q3) / 2 + return mh, nil +} + +// Trimean finds the average of the median and the midhinge +func Trimean(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + c := sortedCopy(input) + q, _ := Quartile(c) + + return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/regression.go b/hotelReservation/vendor/github.com/montanaflynn/stats/regression.go new file mode 100644 index 000000000..a37a74060 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/regression.go @@ -0,0 +1,113 @@ +package stats + +import "math" + +// Series is a container for a series of data +type Series []Coordinate + +// Coordinate holds the data in a series +type Coordinate struct { + X, Y float64 +} + +// LinearRegression finds the least squares linear regression on data series +func LinearRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInput + } + + // Placeholder for the math to be done + var sum [5]float64 + + // Loop over data keeping index in place + i := 0 + for ; i < len(s); i++ { + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X + sum[3] += s[i].X * s[i].Y + sum[4] += s[i].Y * s[i].Y + } + + // Find gradient and intercept + f := float64(i) + gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0]) + intercept := (sum[1] / f) - (gradient * sum[0] / f) + + // Create the new regression series + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: s[j].X*gradient + intercept, + }) + } + + return regressions, nil + +} + +// ExponentialRegression returns an exponential regression on data series +func ExponentialRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInput + } + + var sum [6]float64 + + for i := 0; i < len(s); i++ { + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X * s[i].Y + sum[3] += s[i].Y * math.Log(s[i].Y) + sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y) + sum[5] += s[i].X * s[i].Y + } + + denominator := (sum[1]*sum[2] - sum[5]*sum[5]) + a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator) + b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: a * math.Exp(b*s[j].X), + }) + } + + return regressions, nil + +} + +// LogarithmicRegression returns an logarithmic regression on data series +func LogarithmicRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInput + } + + var sum [4]float64 + + i := 0 + for ; i < len(s); i++ { + sum[0] += math.Log(s[i].X) + sum[1] += s[i].Y * math.Log(s[i].X) + sum[2] += s[i].Y + sum[3] += math.Pow(math.Log(s[i].X), 2) + } + + f := float64(i) + a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0]) + b := (sum[2] - a*sum[0]) / f + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: b + a*math.Log(s[j].X), + }) + } + + return regressions, nil + +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/round.go b/hotelReservation/vendor/github.com/montanaflynn/stats/round.go new file mode 100644 index 000000000..b66779c9f --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/round.go @@ -0,0 +1,38 @@ +package stats + +import "math" + +// Round a float to a specific decimal place or precision +func Round(input float64, places int) (rounded float64, err error) { + + // If the float is not a number + if math.IsNaN(input) { + return math.NaN(), NaNErr + } + + // Find out the actual sign and correct the input for later + sign := 1.0 + if input < 0 { + sign = -1 + input *= -1 + } + + // Use the places arg to get the amount of precision wanted + precision := math.Pow(10, float64(places)) + + // Find the decimal place we are looking to round + digit := input * precision + + // Get the actual decimal number as a fraction to be compared + _, decimal := math.Modf(digit) + + // If the decimal is less than .5 we round down otherwise up + if decimal >= 0.5 { + rounded = math.Ceil(digit) + } else { + rounded = math.Floor(digit) + } + + // Finally we do the math to actually create a rounded number + return rounded / precision * sign, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/sample.go b/hotelReservation/vendor/github.com/montanaflynn/stats/sample.go new file mode 100644 index 000000000..a52f6dcaa --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/sample.go @@ -0,0 +1,44 @@ +package stats + +import "math/rand" + +// Sample returns sample from input with replacement or without +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) { + + if input.Len() == 0 { + return nil, EmptyInput + } + + length := input.Len() + if replacement { + + result := Float64Data{} + rand.Seed(unixnano()) + + // In every step, randomly take the num for + for i := 0; i < takenum; i++ { + idx := rand.Intn(length) + result = append(result, input[idx]) + } + + return result, nil + + } else if !replacement && takenum <= length { + + rand.Seed(unixnano()) + + // Get permutation of number of indexies + perm := rand.Perm(length) + result := Float64Data{} + + // Get element of input by permutated index + for _, idx := range perm[0:takenum] { + result = append(result, input[idx]) + } + + return result, nil + + } + + return nil, BoundsErr +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/sum.go b/hotelReservation/vendor/github.com/montanaflynn/stats/sum.go new file mode 100644 index 000000000..53485f17c --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/sum.go @@ -0,0 +1,18 @@ +package stats + +import "math" + +// Sum adds all the numbers of a slice together +func Sum(input Float64Data) (sum float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Add em up + for _, n := range input { + sum += n + } + + return sum, nil +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/util.go b/hotelReservation/vendor/github.com/montanaflynn/stats/util.go new file mode 100644 index 000000000..881997604 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/util.go @@ -0,0 +1,43 @@ +package stats + +import ( + "sort" + "time" +) + +// float64ToInt rounds a float64 to an int +func float64ToInt(input float64) (output int) { + r, _ := Round(input, 0) + return int(r) +} + +// unixnano returns nanoseconds from UTC epoch +func unixnano() int64 { + return time.Now().UTC().UnixNano() +} + +// copyslice copies a slice of float64s +func copyslice(input Float64Data) Float64Data { + s := make(Float64Data, input.Len()) + copy(s, input) + return s +} + +// sortedCopy returns a sorted copy of float64s +func sortedCopy(input Float64Data) (copy Float64Data) { + copy = copyslice(input) + sort.Float64s(copy) + return +} + +// sortedCopyDif returns a sorted copy of float64s +// only if the original data isn't sorted. +// Only use this if returned slice won't be manipulated! +func sortedCopyDif(input Float64Data) (copy Float64Data) { + if sort.Float64sAreSorted(input) { + return input + } + copy = copyslice(input) + sort.Float64s(copy) + return +} diff --git a/hotelReservation/vendor/github.com/montanaflynn/stats/variance.go b/hotelReservation/vendor/github.com/montanaflynn/stats/variance.go new file mode 100644 index 000000000..66e60c941 --- /dev/null +++ b/hotelReservation/vendor/github.com/montanaflynn/stats/variance.go @@ -0,0 +1,105 @@ +package stats + +import "math" + +// _variance finds the variance for both population and sample data +func _variance(input Float64Data, sample int) (variance float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Sum the square of the mean subtracted from each number + m, _ := Mean(input) + + for _, n := range input { + variance += (float64(n) - m) * (float64(n) - m) + } + + // When getting the mean of the squared differences + // "sample" will allow us to know if it's a sample + // or population and wether to subtract by one or not + return variance / float64((input.Len() - (1 * sample))), nil +} + +// Variance the amount of variation in the dataset +func Variance(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// PopulationVariance finds the amount of variance within a population +func PopulationVariance(input Float64Data) (pvar float64, err error) { + + v, err := _variance(input, 0) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// SampleVariance finds the amount of variance within a sample +func SampleVariance(input Float64Data) (svar float64, err error) { + + v, err := _variance(input, 1) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// Covariance is a measure of how much two sets of data change +func Covariance(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInput + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + // Calculate sum of squares + var ss float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + ss += (delta1*delta2 - ss) / float64(i+1) + } + + return ss * float64(l1) / float64(l1-1), nil +} + +// CovariancePopulation computes covariance for entire population between two variables. +func CovariancePopulation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInput + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + var s float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + s += delta1 * delta2 + } + + return s / float64(l1), nil +} diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/.gitignore b/hotelReservation/vendor/github.com/opentracing/opentracing-go/.gitignore index 565f0f732..c57100a59 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/.gitignore +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/.gitignore @@ -1,13 +1 @@ -# IntelliJ project files -.idea/ -opentracing-go.iml -opentracing-go.ipr -opentracing-go.iws - -# Test results -*.cov -*.html -test.log - -# Build dir -build/ +coverage.txt diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/.travis.yml b/hotelReservation/vendor/github.com/opentracing/opentracing-go/.travis.yml index 0538f1bfc..b950e4296 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/.travis.yml +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/.travis.yml @@ -1,14 +1,20 @@ language: go -go: - - 1.6 - - 1.7 - - 1.8 - - tip +matrix: + include: + - go: "1.13.x" + - go: "1.14.x" + - go: "tip" + env: + - LINT=true + - COVERAGE=true install: - - go get -d -t github.com/opentracing/opentracing-go/... - - go get -u github.com/golang/lint/... + - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi + - go get -u github.com/stretchr/testify/... + script: - - make test lint + - make test - go build ./... + - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi + - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/hotelReservation/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md index 1fc9fdf7f..d3bfcf623 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md @@ -1,14 +1,63 @@ Changes by Version ================== -1.1.0 (unreleased) + +1.2.0 (2020-07-01) +------------------- + +* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro +* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed +* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena +* Add Go module support (#215) -- Zaba505 +* Make SetTag helper types in ext public (#229) -- Blake Edwards +* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov +* Improve noop impementation (#223) -- chanxuehong +* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak +* Fix typo in comments (#222) -- meteorlxy +* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅 +* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad + + +1.1.0 (2019-03-23) +------------------- + +Notable changes: +- The library is now released under Apache 2.0 license +- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159)) +- 'golang.org/x/net/context' is replaced with 'context' from the standard library + +List of all changes: + +- Export StartSpanFromContextWithTracer (#214) +- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) +- Use Set() instead of Add() in HTTPHeadersCarrier (#191) +- Update license to Apache 2.0 (#181) +- Replace 'golang.org/x/net/context' with 'context' (#176) +- Port of Python opentracing/harness/api_check.py to Go (#146) +- Fix race condition in MockSpan.Context() (#170) +- Add PeerHostIPv4.SetString() (#155) +- Add a Noop log field type to log to allow for optional fields (#150) + + +1.0.2 (2017-04-26) ------------------- -- Deprecate InitGlobalTracer() in favor of SetGlobalTracer() +- Add more semantic tags (#139) + + +1.0.1 (2017-02-06) +------------------- +- Correct spelling in comments +- Address race in nextMockID() (#123) +- log: avoid panic marshaling nil error (#131) +- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) +- Drop Go 1.5 that fails in Travis (#129) +- Add convenience methods Key() and Value() to log.Field +- Add convenience methods to log.Field (2 years, 6 months ago) 1.0.0 (2016-09-26) ------------------- -- This release implements OpenTracing Specification 1.0 (http://opentracing.io/spec) +- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/LICENSE b/hotelReservation/vendor/github.com/opentracing/opentracing-go/LICENSE index 148509a40..f0027349e 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/LICENSE +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -1,21 +1,201 @@ -The MIT License (MIT) - -Copyright (c) 2016 The OpenTracing Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/Makefile b/hotelReservation/vendor/github.com/opentracing/opentracing-go/Makefile index 2f491f157..62abb63f5 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/Makefile +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/Makefile @@ -1,26 +1,15 @@ -PACKAGES := . ./mocktracer/... ./ext/... - .DEFAULT_GOAL := test-and-lint -.PHONE: test-and-lint - +.PHONY: test-and-lint test-and-lint: test lint .PHONY: test test: - go test -v -cover ./... + go test -v -cover -race ./... +.PHONY: cover cover: - @rm -rf cover-all.out - $(foreach pkg, $(PACKAGES), $(MAKE) cover-pkg PKG=$(pkg) || true;) - @grep mode: cover.out > coverage.out - @cat cover-all.out >> coverage.out - go tool cover -html=coverage.out -o cover.html - @rm -rf cover.out cover-all.out coverage.out - -cover-pkg: - go test -coverprofile cover.out $(PKG) - @grep -v mode: cover.out >> cover-all.out + go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... .PHONY: lint lint: @@ -29,4 +18,3 @@ lint: @# Run again with magic to exit non-zero if golint outputs anything. @! (golint ./... | read dummy) go vet ./... - diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/README.md b/hotelReservation/vendor/github.com/opentracing/opentracing-go/README.md index 1fb77d227..6ef1d7c9d 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/README.md +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/README.md @@ -1,4 +1,5 @@ [![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) # OpenTracing API for Go @@ -7,8 +8,8 @@ This package is a Go platform API for OpenTracing. ## Required Reading In order to understand the Go platform API, one must first be familiar with the -[OpenTracing project](http://opentracing.io) and -[terminology](http://opentracing.io/documentation/pages/spec.html) more specifically. +[OpenTracing project](https://opentracing.io) and +[terminology](https://opentracing.io/specification/) more specifically. ## API overview for those adding instrumentation @@ -26,7 +27,7 @@ The simplest starting point is `./default_tracer.go`. As early as possible, call import ".../some_tracing_impl" func main() { - opentracing.InitGlobalTracer( + opentracing.SetGlobalTracer( // tracing impl specific: some_tracing_impl.New(...), ) @@ -34,7 +35,7 @@ The simplest starting point is `./default_tracer.go`. As early as possible, call } ``` -##### Non-Singleton initialization +#### Non-Singleton initialization If you prefer direct control to singletons, manage ownership of the `opentracing.Tracer` implementation explicitly. @@ -133,6 +134,21 @@ reference. } ``` +#### Conditionally capture a field using `log.Noop` + +In some situations, you may want to dynamically decide whether or not +to log a field. For example, you may want to capture additional data, +such as a customer ID, in non-production environments: + +```go + func Customer(order *Order) log.Field { + if os.Getenv("ENVIRONMENT") == "dev" { + return log.String("customer", order.Customer.ID) + } + return log.Noop() + } +``` + #### Goroutine-safety The entire public API is goroutine-safe and does not require external @@ -145,3 +161,11 @@ Tracing system implementors may be able to reuse or copy-paste-modify the `basic ## API compatibility For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. + +## Tracer test suite + +A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. + +## Licensing + +[Apache 2.0 License](./LICENSE). diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext.go new file mode 100644 index 000000000..e11977ebe --- /dev/null +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext.go @@ -0,0 +1,24 @@ +package opentracing + +import ( + "context" +) + +// TracerContextWithSpanExtension is an extension interface that the +// implementation of the Tracer interface may want to implement. It +// allows to have some control over the go context when the +// ContextWithSpan is invoked. +// +// The primary purpose of this extension are adapters from opentracing +// API to some other tracing API. +type TracerContextWithSpanExtension interface { + // ContextWithSpanHook gets called by the ContextWithSpan + // function, when the Tracer implementation also implements + // this interface. It allows to put extra information into the + // context and make it available to the callers of the + // ContextWithSpan. + // + // This hook is invoked before the ContextWithSpan function + // actually puts the span into the context. + ContextWithSpanHook(ctx context.Context, span Span) context.Context +} diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext/field.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext/field.go new file mode 100644 index 000000000..8282bd758 --- /dev/null +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext/field.go @@ -0,0 +1,17 @@ +package ext + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/log" +) + +// LogError sets the error=true tag on the Span and logs err as an "error" event. +func LogError(span opentracing.Span, err error, fields ...log.Field) { + Error.Set(span, true) + ef := []log.Field{ + log.Event("error"), + log.Error(err), + } + ef = append(ef, fields...) + span.LogFields(ef...) +} diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext/tags.go index c67ab5eef..a414b5951 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -1,6 +1,6 @@ package ext -import opentracing "github.com/opentracing/opentracing-go" +import "github.com/opentracing/opentracing-go" // These constants define common tag names recommended for better portability across // tracing systems and languages/platforms. @@ -47,40 +47,40 @@ var ( // Component is a low-cardinality identifier of the module, library, // or package that is generating a span. - Component = stringTagName("component") + Component = StringTagName("component") ////////////////////////////////////////////////////////////////////// // Sampling hint ////////////////////////////////////////////////////////////////////// // SamplingPriority determines the priority of sampling this Span. - SamplingPriority = uint16TagName("sampling.priority") + SamplingPriority = Uint16TagName("sampling.priority") ////////////////////////////////////////////////////////////////////// - // Peer tags. These tags can be emitted by either client-side of + // Peer tags. These tags can be emitted by either client-side or // server-side to describe the other side/service in a peer-to-peer // communications, like an RPC call. ////////////////////////////////////////////////////////////////////// // PeerService records the service name of the peer. - PeerService = stringTagName("peer.service") + PeerService = StringTagName("peer.service") // PeerAddress records the address name of the peer. This may be a "ip:port", // a bare "hostname", a FQDN or even a database DSN substring // like "mysql://username@127.0.0.1:3306/dbname" - PeerAddress = stringTagName("peer.address") + PeerAddress = StringTagName("peer.address") // PeerHostname records the host name of the peer - PeerHostname = stringTagName("peer.hostname") + PeerHostname = StringTagName("peer.hostname") // PeerHostIPv4 records IP v4 host address of the peer - PeerHostIPv4 = uint32TagName("peer.ipv4") + PeerHostIPv4 = IPv4TagName("peer.ipv4") // PeerHostIPv6 records IP v6 host address of the peer - PeerHostIPv6 = stringTagName("peer.ipv6") + PeerHostIPv6 = StringTagName("peer.ipv6") // PeerPort records port number of the peer - PeerPort = uint16TagName("peer.port") + PeerPort = Uint16TagName("peer.port") ////////////////////////////////////////////////////////////////////// // HTTP Tags @@ -88,46 +88,46 @@ var ( // HTTPUrl should be the URL of the request being handled in this segment // of the trace, in standard URI format. The protocol is optional. - HTTPUrl = stringTagName("http.url") + HTTPUrl = StringTagName("http.url") // HTTPMethod is the HTTP method of the request, and is case-insensitive. - HTTPMethod = stringTagName("http.method") + HTTPMethod = StringTagName("http.method") // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the // HTTP response. - HTTPStatusCode = uint16TagName("http.status_code") + HTTPStatusCode = Uint16TagName("http.status_code") ////////////////////////////////////////////////////////////////////// // DB Tags ////////////////////////////////////////////////////////////////////// // DBInstance is database instance name. - DBInstance = stringTagName("db.instance") + DBInstance = StringTagName("db.instance") // DBStatement is a database statement for the given database type. // It can be a query or a prepared statement (i.e., before substitution). - DBStatement = stringTagName("db.statement") + DBStatement = StringTagName("db.statement") // DBType is a database type. For any SQL database, "sql". // For others, the lower-case database category, e.g. "redis" - DBType = stringTagName("db.type") + DBType = StringTagName("db.type") // DBUser is a username for accessing database. - DBUser = stringTagName("db.user") + DBUser = StringTagName("db.user") ////////////////////////////////////////////////////////////////////// // Message Bus Tag ////////////////////////////////////////////////////////////////////// // MessageBusDestination is an address at which messages can be exchanged - MessageBusDestination = stringTagName("message_bus.destination") + MessageBusDestination = StringTagName("message_bus.destination") ////////////////////////////////////////////////////////////////////// // Error Tag ////////////////////////////////////////////////////////////////////// // Error indicates that operation represented by the span resulted in an error. - Error = boolTagName("error") + Error = BoolTagName("error") ) // --- @@ -163,36 +163,53 @@ func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption // --- -type stringTagName string +// StringTagName is a common tag name to be set to a string value +type StringTagName string // Set adds a string tag to the `span` -func (tag stringTagName) Set(span opentracing.Span, value string) { +func (tag StringTagName) Set(span opentracing.Span, value string) { span.SetTag(string(tag), value) } // --- -type uint32TagName string +// Uint32TagName is a common tag name to be set to a uint32 value +type Uint32TagName string // Set adds a uint32 tag to the `span` -func (tag uint32TagName) Set(span opentracing.Span, value uint32) { +func (tag Uint32TagName) Set(span opentracing.Span, value uint32) { span.SetTag(string(tag), value) } // --- -type uint16TagName string +// Uint16TagName is a common tag name to be set to a uint16 value +type Uint16TagName string // Set adds a uint16 tag to the `span` -func (tag uint16TagName) Set(span opentracing.Span, value uint16) { +func (tag Uint16TagName) Set(span opentracing.Span, value uint16) { span.SetTag(string(tag), value) } // --- -type boolTagName string +// BoolTagName is a common tag name to be set to a bool value +type BoolTagName string -// Add adds a bool tag to the `span` -func (tag boolTagName) Set(span opentracing.Span, value bool) { +// Set adds a bool tag to the `span` +func (tag BoolTagName) Set(span opentracing.Span, value bool) { + span.SetTag(string(tag), value) +} + +// IPv4TagName is a common tag name to be set to an ipv4 value +type IPv4TagName string + +// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility +func (tag IPv4TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" +func (tag IPv4TagName) SetString(span opentracing.Span, value string) { span.SetTag(string(tag), value) } diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/globaltracer.go index 8c8e793ff..4f7066a92 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/globaltracer.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -1,7 +1,12 @@ package opentracing +type registeredTracer struct { + tracer Tracer + isRegistered bool +} + var ( - globalTracer Tracer = NoopTracer{} + globalTracer = registeredTracer{NoopTracer{}, false} ) // SetGlobalTracer sets the [singleton] opentracing.Tracer returned by @@ -11,22 +16,27 @@ var ( // Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` // (etc) globals are noops. func SetGlobalTracer(tracer Tracer) { - globalTracer = tracer + globalTracer = registeredTracer{tracer, true} } // GlobalTracer returns the global singleton `Tracer` implementation. // Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop // implementation that drops all data handed to it. func GlobalTracer() Tracer { - return globalTracer + return globalTracer.tracer } // StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. func StartSpan(operationName string, opts ...StartSpanOption) Span { - return globalTracer.StartSpan(operationName, opts...) + return globalTracer.tracer.StartSpan(operationName, opts...) } // InitGlobalTracer is deprecated. Please use SetGlobalTracer. func InitGlobalTracer(tracer Tracer) { SetGlobalTracer(tracer) } + +// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered +func IsGlobalTracerRegistered() bool { + return globalTracer.isRegistered +} diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/gocontext.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/gocontext.go index 222a65202..1831bc9b2 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/gocontext.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -1,14 +1,19 @@ package opentracing -import "golang.org/x/net/context" +import "context" type contextKey struct{} var activeSpanKey = contextKey{} // ContextWithSpan returns a new `context.Context` that holds a reference to -// `span`'s SpanContext. +// the span. If span is nil, a new context without an active span is returned. func ContextWithSpan(ctx context.Context, span Span) context.Context { + if span != nil { + if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { + ctx = tracerWithHook.ContextWithSpanHook(ctx, span) + } + } return context.WithValue(ctx, activeSpanKey, span) } @@ -41,17 +46,20 @@ func SpanFromContext(ctx context.Context) Span { // ... // } func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { - return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) + return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) } -// startSpanFromContextWithTracer is factored out for testing purposes. -func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { - var span Span +// StartSpanFromContextWithTracer starts and returns a span with `operationName` +// using a span found within the context as a ChildOfRef. If that doesn't exist +// it creates a root span. It also returns a context.Context object built +// around the returned span. +// +// It's behavior is identical to StartSpanFromContext except that it takes an explicit +// tracer as opposed to using the global tracer. +func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { if parentSpan := SpanFromContext(ctx); parentSpan != nil { opts = append(opts, ChildOf(parentSpan.Context())) - span = tracer.StartSpan(operationName, opts...) - } else { - span = tracer.StartSpan(operationName, opts...) } + span := tracer.StartSpan(operationName, opts...) return span, ContextWithSpan(ctx, span) } diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/log/field.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/log/field.go index d2cd39a16..f222ded79 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/log/field.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -20,6 +20,7 @@ const ( errorType objectType lazyLoggerType + noopType ) // Field instances are constructed via LogBool, LogString, and so on. @@ -121,16 +122,19 @@ func Float64(key string, val float64) Field { } } -// Error adds an error with the key "error" to a Span.LogFields() record +// Error adds an error with the key "error.object" to a Span.LogFields() record func Error(err error) Field { return Field{ - key: "error", + key: "error.object", fieldType: errorType, interfaceVal: err, } } // Object adds an object-valued key:value pair to a Span.LogFields() record +// Please pass in an immutable object, otherwise there may be concurrency issues. +// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write". +// Because span is sent asynchronously, it is possible that this map will also be modified. func Object(key string, obj interface{}) Field { return Field{ key: key, @@ -139,6 +143,16 @@ func Object(key string, obj interface{}) Field { } } +// Event creates a string-valued Field for span logs with key="event" and value=val. +func Event(val string) Field { + return String("event", val) +} + +// Message creates a string-valued Field for span logs with key="message" and value=val. +func Message(val string) Field { + return String("message", val) +} + // LazyLogger allows for user-defined, late-bound logging of arbitrary data type LazyLogger func(fv Encoder) @@ -152,6 +166,25 @@ func Lazy(ll LazyLogger) Field { } } +// Noop creates a no-op log field that should be ignored by the tracer. +// It can be used to capture optional fields, for example those that should +// only be logged in non-production environment: +// +// func customerField(order *Order) log.Field { +// if os.Getenv("ENVIRONMENT") == "dev" { +// return log.String("customer", order.Customer.ID) +// } +// return log.Noop() +// } +// +// span.LogFields(log.String("event", "purchase"), customerField(order)) +// +func Noop() Field { + return Field{ + fieldType: noopType, + } +} + // Encoder allows access to the contents of a Field (via a call to // Field.Marshal). // @@ -203,6 +236,8 @@ func (lf Field) Marshal(visitor Encoder) { visitor.EmitObject(lf.key, lf.interfaceVal) case lazyLoggerType: visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + case noopType: + // intentionally left blank } } @@ -234,6 +269,8 @@ func (lf Field) Value() interface{} { return math.Float64frombits(uint64(lf.numericVal)) case errorType, objectType, lazyLoggerType: return lf.interfaceVal + case noopType: + return nil default: return nil } diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/log/util.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/log/util.go index 3832feb5c..d57e28aa5 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/log/util.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -1,6 +1,9 @@ package log -import "fmt" +import ( + "fmt" + "reflect" +) // InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice // a la Span.LogFields(). @@ -46,6 +49,10 @@ func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { case float64: fields[i] = Float64(key, typedVal) default: + if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) { + fields[i] = String(key, "nil") + continue + } // When in doubt, coerce to a string fields[i] = String(key, fmt.Sprint(typedVal)) } diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/noop.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/noop.go index 0d32f692c..f9b680a21 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/noop.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/noop.go @@ -21,9 +21,9 @@ type noopSpan struct{} type noopSpanContext struct{} var ( - defaultNoopSpanContext = noopSpanContext{} - defaultNoopSpan = noopSpan{} - defaultNoopTracer = NoopTracer{} + defaultNoopSpanContext SpanContext = noopSpanContext{} + defaultNoopSpan Span = noopSpan{} + defaultNoopTracer Tracer = NoopTracer{} ) const ( @@ -35,7 +35,7 @@ func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} // noopSpan: func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } -func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } +func (n noopSpan) SetBaggageItem(key, val string) Span { return n } func (n noopSpan) BaggageItem(key string) string { return emptyString } func (n noopSpan) SetTag(key string, value interface{}) Span { return n } func (n noopSpan) LogFields(fields ...log.Field) {} diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/propagation.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/propagation.go index 9583fc53a..b0c275eb0 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/propagation.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -72,18 +72,18 @@ const ( // // For Tracer.Extract(): the carrier must be a `TextMapReader`. // - // See HTTPHeaderCarrier for an implementation of both TextMapWriter + // See HTTPHeadersCarrier for an implementation of both TextMapWriter // and TextMapReader that defers to an http.Header instance for storage. // For example, Inject(): // // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) // err := span.Tracer().Inject( - // span, opentracing.HTTPHeaders, carrier) + // span.Context(), opentracing.HTTPHeaders, carrier) // // Or Extract(): // // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // span, err := tracer.Extract( + // clientContext, err := tracer.Extract( // opentracing.HTTPHeaders, carrier) // HTTPHeaders @@ -144,15 +144,15 @@ func (c TextMapCarrier) Set(key, val string) { // // Example usage for server side: // -// carrier := opentracing.HttpHeadersCarrier(httpReq.Header) -// spanContext, err := tracer.Extract(opentracing.HttpHeaders, carrier) +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) // // Example usage for client side: // // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) // err := tracer.Inject( // span.Context(), -// opentracing.HttpHeaders, +// opentracing.HTTPHeaders, // carrier) // type HTTPHeadersCarrier http.Header @@ -160,7 +160,7 @@ type HTTPHeadersCarrier http.Header // Set conforms to the TextMapWriter interface. func (c HTTPHeadersCarrier) Set(key, val string) { h := http.Header(c) - h.Add(key, val) + h.Set(key, val) } // ForeachKey conforms to the TextMapReader interface. diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/span.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/span.go index f6c3234ac..0d3fb5341 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/span.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/span.go @@ -41,6 +41,8 @@ type Span interface { Context() SpanContext // Sets or changes the operation name. + // + // Returns a reference to this Span for chaining. SetOperationName(operationName string) Span // Adds a tag to the span. @@ -51,6 +53,8 @@ type Span interface { // other tag value types is undefined at the OpenTracing level. If a // tracing system does not know how to handle a particular value type, it // may ignore the tag, but shall not panic. + // + // Returns a reference to this Span for chaining. SetTag(key string, value interface{}) Span // LogFields is an efficient and type-checked way to record key:value diff --git a/hotelReservation/vendor/github.com/opentracing/opentracing-go/tracer.go b/hotelReservation/vendor/github.com/opentracing/opentracing-go/tracer.go index fd77c1df3..715f0cedf 100644 --- a/hotelReservation/vendor/github.com/opentracing/opentracing-go/tracer.go +++ b/hotelReservation/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -30,7 +30,7 @@ type Tracer interface { // sp := tracer.StartSpan( // "GetFeed", // opentracing.ChildOf(parentSpan.Context()), - // opentracing.Tag("user_agent", loggedReq.UserAgent), + // opentracing.Tag{"user_agent", loggedReq.UserAgent}, // opentracing.StartTime(loggedReq.Timestamp), // ) // @@ -44,8 +44,7 @@ type Tracer interface { // and each has an expected carrier type. // // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see - // https://godoc.org/golang.org/x/net/context#WithValue). + // used by `context.Context` (see https://godoc.org/context#WithValue). // // Example usage (sans error handling): // diff --git a/hotelReservation/vendor/github.com/pkg/errors/.gitignore b/hotelReservation/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/hotelReservation/vendor/github.com/pkg/errors/.travis.yml b/hotelReservation/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 000000000..9159de03e --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,10 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.11.x + - 1.12.x + - 1.13.x + - tip + +script: + - make check diff --git a/hotelReservation/vendor/github.com/pkg/errors/LICENSE b/hotelReservation/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 000000000..835ba3e75 --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hotelReservation/vendor/github.com/pkg/errors/Makefile b/hotelReservation/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 000000000..ce9d7cded --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/hotelReservation/vendor/github.com/pkg/errors/README.md b/hotelReservation/vendor/github.com/pkg/errors/README.md new file mode 100644 index 000000000..54dfdcb12 --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,59 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + +## Contributing + +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. + +Before sending a PR, please discuss your change by raising an issue. + +## License + +BSD-2-Clause diff --git a/hotelReservation/vendor/github.com/pkg/errors/appveyor.yml b/hotelReservation/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 000000000..a932eade0 --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/hotelReservation/vendor/github.com/pkg/errors/errors.go b/hotelReservation/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 000000000..161aea258 --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,288 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which when applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// together with the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required, the errors.WithStack and +// errors.WithMessage functions destructure errors.Wrap into its component +// operations: annotating an error with a stack trace and with a message, +// respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error that does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// Although the causer interface is not exported by this package, it is +// considered a part of its stable public interface. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported: +// +// %s print the error. If the error has a Cause it will be +// printed recursively. +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface: +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// The returned errors.StackTrace type is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d\n", f, f) +// } +// } +// +// Although the stackTracer interface is not exported by this package, it is +// considered a part of its stable public interface. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is called, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +// WithMessagef annotates err with the format specifier. +// If err is nil, WithMessagef returns nil. +func WithMessagef(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/hotelReservation/vendor/github.com/pkg/errors/go113.go b/hotelReservation/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 000000000..be0d10d0c --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/hotelReservation/vendor/github.com/pkg/errors/stack.go b/hotelReservation/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 000000000..779a8348f --- /dev/null +++ b/hotelReservation/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,177 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strconv" + "strings" +) + +// Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + io.WriteString(s, strconv.Itoa(f.line())) + case 'n': + io.WriteString(s, funcname(f.name())) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + io.WriteString(s, "\n") + f.Format(s, verb) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + st.formatSlice(s, verb) + } + case 's': + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) + } + io.WriteString(s, "]") +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/.gitignore b/hotelReservation/vendor/github.com/rs/zerolog/.gitignore new file mode 100644 index 000000000..8ebe58b15 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +tmp + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/hotelReservation/vendor/github.com/rs/zerolog/CNAME b/hotelReservation/vendor/github.com/rs/zerolog/CNAME new file mode 100644 index 000000000..9ce57a6eb --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/CNAME @@ -0,0 +1 @@ +zerolog.io \ No newline at end of file diff --git a/hotelReservation/vendor/github.com/rs/zerolog/LICENSE b/hotelReservation/vendor/github.com/rs/zerolog/LICENSE new file mode 100644 index 000000000..677e07f7a --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/hotelReservation/vendor/github.com/rs/zerolog/README.md b/hotelReservation/vendor/github.com/rs/zerolog/README.md new file mode 100644 index 000000000..972b729fb --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/README.md @@ -0,0 +1,786 @@ +# Zero Allocation JSON Logger + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://github.com/rs/zerolog/actions/workflows/test.yml/badge.svg)](https://github.com/rs/zerolog/actions/workflows/test.yml) [![Go Coverage](https://github.com/rs/zerolog/wiki/coverage.svg)](https://raw.githack.com/wiki/rs/zerolog/coverage.html) + +The zerolog package provides a fast and simple logger dedicated to JSON output. + +Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection. + +Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance. + +To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging). + +![Pretty Logging Image](pretty.png) + +## Who uses zerolog + +Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list. + +## Features + +* [Blazing fast](#benchmarks) +* [Low to zero allocation](#benchmarks) +* [Leveled logging](#leveled-logging) +* [Sampling](#log-sampling) +* [Hooks](#hooks) +* [Contextual fields](#contextual-logging) +* [`context.Context` integration](#contextcontext-integration) +* [Integration with `net/http`](#integration-with-nethttp) +* [JSON and CBOR encoding formats](#binary-encoding) +* [Pretty logging for development](#pretty-logging) +* [Error Logging (with optional Stacktrace)](#error-logging) + +## Installation + +```bash +go get -u github.com/rs/zerolog/log +``` + +## Getting Started + +### Simple Logging Example + +For simple logging, import the global logger package **github.com/rs/zerolog/log** + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + // UNIX Time is faster and smaller than most timestamps + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Print("hello world") +} + +// Output: {"time":1516134303,"level":"debug","message":"hello world"} +``` +> Note: By default log writes to `os.Stderr` +> Note: The default log level for `log.Print` is *debug* + +### Contextual Logging + +**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below: + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Debug(). + Str("Scale", "833 cents"). + Float64("Interval", 833.09). + Msg("Fibonacci is everywhere") + + log.Debug(). + Str("Name", "Tom"). + Send() +} + +// Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"} +// Output: {"level":"debug","Name":"Tom","time":1562212768} +``` + +> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types) + +### Leveled Logging + +#### Simple Leveled Logging Example + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Info().Msg("hello world") +} + +// Output: {"time":1516134303,"level":"info","message":"hello world"} +``` + +> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. + +**zerolog** allows for logging at the following levels (from highest to lowest): + +* panic (`zerolog.PanicLevel`, 5) +* fatal (`zerolog.FatalLevel`, 4) +* error (`zerolog.ErrorLevel`, 3) +* warn (`zerolog.WarnLevel`, 2) +* info (`zerolog.InfoLevel`, 1) +* debug (`zerolog.DebugLevel`, 0) +* trace (`zerolog.TraceLevel`, -1) + +You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant. + +#### Setting Global Log Level + +This example uses command-line flags to demonstrate various outputs depending on the chosen log level. + +```go +package main + +import ( + "flag" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + debug := flag.Bool("debug", false, "sets log level to debug") + + flag.Parse() + + // Default level for this example is info, unless debug flag is present + zerolog.SetGlobalLevel(zerolog.InfoLevel) + if *debug { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + + log.Debug().Msg("This message appears only when log level set to Debug") + log.Info().Msg("This message appears when log level set to Debug or Info") + + if e := log.Debug(); e.Enabled() { + // Compute log output only if enabled. + value := "bar" + e.Str("foo", value).Msg("some debug message") + } +} +``` + +Info Output (no flag) + +```bash +$ ./logLevelExample +{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"} +``` + +Debug Output (debug flag set) + +```bash +$ ./logLevelExample -debug +{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"} +{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"} +{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"} +``` + +#### Logging without Level or Message + +You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below. + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Log(). + Str("foo", "bar"). + Msg("") +} + +// Output: {"time":1494567715,"foo":"bar"} +``` + +### Error Logging + +You can log errors using the `Err` method + +```go +package main + +import ( + "errors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + err := errors.New("seems we have an error here") + log.Error().Err(err).Msg("") +} + +// Output: {"level":"error","error":"seems we have an error here","time":1609085256} +``` + +> The default field name for errors is `error`, you can change this by setting `zerolog.ErrorFieldName` to meet your needs. + +#### Error Logging with Stacktrace + +Using `github.com/pkg/errors`, you can add a formatted stacktrace to your errors. + +```go +package main + +import ( + "github.com/pkg/errors" + "github.com/rs/zerolog/pkgerrors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + + err := outer() + log.Error().Stack().Err(err).Msg("") +} + +func inner() error { + return errors.New("seems we have an error here") +} + +func middle() error { + err := inner() + if err != nil { + return err + } + return nil +} + +func outer() error { + err := middle() + if err != nil { + return err + } + return nil +} + +// Output: {"level":"error","stack":[{"func":"inner","line":"20","source":"errors.go"},{"func":"middle","line":"24","source":"errors.go"},{"func":"outer","line":"32","source":"errors.go"},{"func":"main","line":"15","source":"errors.go"},{"func":"main","line":"204","source":"proc.go"},{"func":"goexit","line":"1374","source":"asm_amd64.s"}],"error":"seems we have an error here","time":1609086683} +``` + +> zerolog.ErrorStackMarshaler must be set in order for the stack to output anything. + +#### Logging Fatal Messages + +```go +package main + +import ( + "errors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + err := errors.New("A repo man spends his life getting into tense situations") + service := "myservice" + + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Fatal(). + Err(err). + Str("service", service). + Msgf("Cannot start %s", service) +} + +// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} +// exit status 1 +``` + +> NOTE: Using `Msgf` generates one allocation even when the logger is disabled. + + +### Create logger instance to manage different outputs + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() + +logger.Info().Str("foo", "bar").Msg("hello world") + +// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"} +``` + +### Sub-loggers let you chain loggers with additional context + +```go +sublogger := log.With(). + Str("component", "foo"). + Logger() +sublogger.Info().Msg("hello world") + +// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"} +``` + +### Pretty logging + +To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`: + +```go +log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) + +log.Info().Str("foo", "bar").Msg("Hello world") + +// Output: 3:04PM INF Hello World foo=bar +``` + +To customize the configuration and formatting: + +```go +output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} +output.FormatLevel = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("| %-6s|", i)) +} +output.FormatMessage = func(i interface{}) string { + return fmt.Sprintf("***%s****", i) +} +output.FormatFieldName = func(i interface{}) string { + return fmt.Sprintf("%s:", i) +} +output.FormatFieldValue = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("%s", i)) +} + +log := zerolog.New(output).With().Timestamp().Logger() + +log.Info().Str("foo", "bar").Msg("Hello World") + +// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR +``` + +### Sub dictionary + +```go +log.Info(). + Str("foo", "bar"). + Dict("dict", zerolog.Dict(). + Str("bar", "baz"). + Int("n", 1), + ).Msg("hello world") + +// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} +``` + +### Customize automatic field names + +```go +zerolog.TimestampFieldName = "t" +zerolog.LevelFieldName = "l" +zerolog.MessageFieldName = "m" + +log.Info().Msg("hello world") + +// Output: {"l":"info","t":1494567715,"m":"hello world"} +``` + +### Add contextual fields to the global logger + +```go +log.Logger = log.With().Str("foo", "bar").Logger() +``` + +### Add file and line number to log + +Equivalent of `Llongfile`: + +```go +log.Logger = log.With().Caller().Logger() +log.Info().Msg("hello world") + +// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"} +``` + +Equivalent of `Lshortfile`: + +```go +zerolog.CallerMarshalFunc = func(pc uintptr, file string, line int) string { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + return file + ":" + strconv.Itoa(line) +} +log.Logger = log.With().Caller().Logger() +log.Info().Msg("hello world") + +// Output: {"level": "info", "message": "hello world", "caller": "some_file:21"} +``` + +### Thread-safe, lock-free, non-blocking writer + +If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follows: + +```go +wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) { + fmt.Printf("Logger Dropped %d messages", missed) + }) +log := zerolog.New(wr) +log.Print("test") +``` + +You will need to install `code.cloudfoundry.org/go-diodes` to use this feature. + +### Log Sampling + +```go +sampled := log.Sample(&zerolog.BasicSampler{N: 10}) +sampled.Info().Msg("will be logged every 10 messages") + +// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"} +``` + +More advanced sampling: + +```go +// Will let 5 debug messages per period of 1 second. +// Over 5 debug message, 1 every 100 debug messages are logged. +// Other levels are not sampled. +sampled := log.Sample(zerolog.LevelSampler{ + DebugSampler: &zerolog.BurstSampler{ + Burst: 5, + Period: 1*time.Second, + NextSampler: &zerolog.BasicSampler{N: 100}, + }, +}) +sampled.Debug().Msg("hello world") + +// Output: {"time":1494567715,"level":"debug","message":"hello world"} +``` + +### Hooks + +```go +type SeverityHook struct{} + +func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + if level != zerolog.NoLevel { + e.Str("severity", level.String()) + } +} + +hooked := log.Hook(SeverityHook{}) +hooked.Warn().Msg("") + +// Output: {"level":"warn","severity":"warn"} +``` + +### Pass a sub-logger by context + +```go +ctx := log.With().Str("component", "module").Logger().WithContext(ctx) + +log.Ctx(ctx).Info().Msg("hello world") + +// Output: {"component":"module","level":"info","message":"hello world"} +``` + +### Set as standard logger output + +```go +log := zerolog.New(os.Stdout).With(). + Str("foo", "bar"). + Logger() + +stdlog.SetFlags(0) +stdlog.SetOutput(log) + +stdlog.Print("hello world") + +// Output: {"foo":"bar","message":"hello world"} +``` + +### context.Context integration + +Go contexts are commonly passed throughout Go code, and this can help you pass +your Logger into places it might otherwise be hard to inject. The `Logger` +instance may be attached to Go context (`context.Context`) using +`Logger.WithContext(ctx)` and extracted from it using `zerolog.Ctx(ctx)`. +For example: + +```go +func f() { + logger := zerolog.New(os.Stdout) + ctx := context.Background() + + // Attach the Logger to the context.Context + ctx = logger.WithContext(ctx) + someFunc(ctx) +} + +func someFunc(ctx context.Context) { + // Get Logger from the go Context. if it's nil, then + // `zerolog.DefaultContextLogger` is returned, if + // `DefaultContextLogger` is nil, then a disabled logger is returned. + logger := zerolog.Ctx(ctx) + logger.Info().Msg("Hello") +} +``` + +A second form of `context.Context` integration allows you to pass the current +context.Context into the logged event, and retrieve it from hooks. This can be +useful to log trace and span IDs or other information stored in the go context, +and facilitates the unification of logging and tracing in some systems: + +```go +type TracingHook struct{} + +func (h TracingHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + ctx := e.Ctx() + spanId := getSpanIdFromContext(ctx) // as per your tracing framework + e.Str("span-id", spanId) +} + +func f() { + // Setup the logger + logger := zerolog.New(os.Stdout) + logger = logger.Hook(TracingHook{}) + + ctx := context.Background() + // Use the Ctx function to make the context available to the hook + logger.Info().Ctx(ctx).Msg("Hello") +} +``` + +### Integration with `net/http` + +The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`. + +In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability. + +```go +log := zerolog.New(os.Stdout).With(). + Timestamp(). + Str("role", "my-service"). + Str("host", host). + Logger() + +c := alice.New() + +// Install the logger handler with default output on the console +c = c.Append(hlog.NewHandler(log)) + +// Install some provided extra handler to set some request's context fields. +// Thanks to that handler, all our logs will come with some prepopulated fields. +c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { + hlog.FromRequest(r).Info(). + Str("method", r.Method). + Stringer("url", r.URL). + Int("status", status). + Int("size", size). + Dur("duration", duration). + Msg("") +})) +c = c.Append(hlog.RemoteAddrHandler("ip")) +c = c.Append(hlog.UserAgentHandler("user_agent")) +c = c.Append(hlog.RefererHandler("referer")) +c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id")) + +// Here is your final handler +h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Get the logger from the request's context. You can safely assume it + // will be always there: if the handler is removed, hlog.FromRequest + // will return a no-op logger. + hlog.FromRequest(r).Info(). + Str("user", "current user"). + Str("status", "ok"). + Msg("Something happened") + + // Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"} +})) +http.Handle("/", h) + +if err := http.ListenAndServe(":8080", nil); err != nil { + log.Fatal().Err(err).Msg("Startup failed") +} +``` + +## Multiple Log Output +`zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs. +In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter. +```go +func main() { + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout} + + multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout) + + logger := zerolog.New(multi).With().Timestamp().Logger() + + logger.Info().Msg("Hello World!") +} + +// Output (Line 1: Console; Line 2: Stdout) +// 12:36PM INF Hello World! +// {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"} +``` + +## Global Settings + +Some settings can be changed and will be applied to all loggers: + +* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods). +* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode). +* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events. +* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name. +* `zerolog.LevelFieldName`: Can be set to customize level field name. +* `zerolog.MessageFieldName`: Can be set to customize message field name. +* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name. +* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp. +* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`). +* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`). +* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking. + +## Field Types + +### Standard Types + +* `Str` +* `Bool` +* `Int`, `Int8`, `Int16`, `Int32`, `Int64` +* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64` +* `Float32`, `Float64` + +### Advanced Fields + +* `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name. +* `Func`: Run a `func` only if the level is enabled. +* `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`. +* `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`. +* `Dur`: Adds a field with `time.Duration`. +* `Dict`: Adds a sub-key/value as a field of the event. +* `RawJSON`: Adds a field with an already encoded JSON (`[]byte`) +* `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`) +* `Interface`: Uses reflection to marshal the type. + +Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.) + +## Binary Encoding + +In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](https://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows: + +```bash +go build -tags binary_log . +``` + +To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work +with zerolog library is [CSD](https://github.com/toravir/csd/). + +## Related Projects + +* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog` +* [overlog](https://github.com/Trendyol/overlog): Implementation of `Mapped Diagnostic Context` interface using `zerolog` +* [zerologr](https://github.com/go-logr/zerologr): Implementation of `logr.LogSink` interface using `zerolog` + +## Benchmarks + +See [logbench](http://bench.zerolog.io/) for more comprehensive and up-to-date benchmarks. + +All operations are allocation free (those numbers *include* JSON encoding): + +```text +BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op +BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op +BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op +BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op +BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op +``` + +There are a few Go logging benchmarks and comparisons that include zerolog. + +* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) +* [uber-common/zap](https://github.com/uber-go/zap#performance) + +Using Uber's zap comparison benchmark: + +Log a message and 10 fields: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 767 ns/op | 552 B/op | 6 allocs/op | +| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op | +| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op | +| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op | +| lion | 5392 ns/op | 5807 B/op | 63 allocs/op | +| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op | +| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op | +| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 52 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | +| lion | 2702 ns/op | 4074 B/op | 38 allocs/op | +| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op | +| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op | +| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op | +| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 50 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op | +| standard library | 453 ns/op | 80 B/op | 2 allocs/op | +| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | +| go-kit | 508 ns/op | 656 B/op | 13 allocs/op | +| lion | 771 ns/op | 1224 B/op | 10 allocs/op | +| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op | +| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op | +| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op | + +## Caveats + +### Field duplication + +Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON: + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +logger.Info(). + Timestamp(). + Msg("dup") +// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +``` + +In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt. + +### Concurrency safety + +Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger: + +```go +func handler(w http.ResponseWriter, r *http.Request) { + // Create a child logger for concurrency safety + logger := log.Logger.With().Logger() + + // Add context fields, for example User-Agent from HTTP headers + logger.UpdateContext(func(c zerolog.Context) zerolog.Context { + ... + }) +} +``` diff --git a/hotelReservation/vendor/github.com/rs/zerolog/_config.yml b/hotelReservation/vendor/github.com/rs/zerolog/_config.yml new file mode 100644 index 000000000..a1e896d7b --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/_config.yml @@ -0,0 +1 @@ +remote_theme: rs/gh-readme diff --git a/hotelReservation/vendor/github.com/rs/zerolog/array.go b/hotelReservation/vendor/github.com/rs/zerolog/array.go new file mode 100644 index 000000000..99612ee9f --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/array.go @@ -0,0 +1,240 @@ +package zerolog + +import ( + "net" + "sync" + "time" +) + +var arrayPool = &sync.Pool{ + New: func() interface{} { + return &Array{ + buf: make([]byte, 0, 500), + } + }, +} + +// Array is used to prepopulate an array of items +// which can be re-used to add to log messages. +type Array struct { + buf []byte +} + +func putArray(a *Array) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(a.buf) > maxSize { + return + } + arrayPool.Put(a) +} + +// Arr creates an array to be added to an Event or Context. +func Arr() *Array { + a := arrayPool.Get().(*Array) + a.buf = a.buf[:0] + return a +} + +// MarshalZerologArray method here is no-op - since data is +// already in the needed format. +func (*Array) MarshalZerologArray(*Array) { +} + +func (a *Array) write(dst []byte) []byte { + dst = enc.AppendArrayStart(dst) + if len(a.buf) > 0 { + dst = append(dst, a.buf...) + } + dst = enc.AppendArrayEnd(dst) + putArray(a) + return dst +} + +// Object marshals an object that implement the LogObjectMarshaler +// interface and appends it to the array. +func (a *Array) Object(obj LogObjectMarshaler) *Array { + e := Dict() + obj.MarshalZerologObject(e) + e.buf = enc.AppendEndMarker(e.buf) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + return a +} + +// Str appends the val as a string to the array. +func (a *Array) Str(val string) *Array { + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Bytes appends the val as a string to the array. +func (a *Array) Bytes(val []byte) *Array { + a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Hex appends the val as a hex string to the array. +func (a *Array) Hex(val []byte) *Array { + a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val) + return a +} + +// RawJSON adds already encoded JSON to the array. +func (a *Array) RawJSON(val []byte) *Array { + a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Err serializes and appends the err to the array. +func (a *Array) Err(err error) *Array { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf)) + } else { + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error()) + } + case string: + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m) + default: + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m) + } + + return a +} + +// Bool appends the val as a bool to the array. +func (a *Array) Bool(b bool) *Array { + a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b) + return a +} + +// Int appends i as a int to the array. +func (a *Array) Int(i int) *Array { + a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int8 appends i as a int8 to the array. +func (a *Array) Int8(i int8) *Array { + a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int16 appends i as a int16 to the array. +func (a *Array) Int16(i int16) *Array { + a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int32 appends i as a int32 to the array. +func (a *Array) Int32(i int32) *Array { + a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int64 appends i as a int64 to the array. +func (a *Array) Int64(i int64) *Array { + a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint appends i as a uint to the array. +func (a *Array) Uint(i uint) *Array { + a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint8 appends i as a uint8 to the array. +func (a *Array) Uint8(i uint8) *Array { + a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint16 appends i as a uint16 to the array. +func (a *Array) Uint16(i uint16) *Array { + a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint32 appends i as a uint32 to the array. +func (a *Array) Uint32(i uint32) *Array { + a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint64 appends i as a uint64 to the array. +func (a *Array) Uint64(i uint64) *Array { + a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Float32 appends f as a float32 to the array. +func (a *Array) Float32(f float32) *Array { + a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f) + return a +} + +// Float64 appends f as a float64 to the array. +func (a *Array) Float64(f float64) *Array { + a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f) + return a +} + +// Time appends t formatted as string using zerolog.TimeFieldFormat. +func (a *Array) Time(t time.Time) *Array { + a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat) + return a +} + +// Dur appends d to the array. +func (a *Array) Dur(d time.Duration) *Array { + a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger) + return a +} + +// Interface appends i marshaled using reflection. +func (a *Array) Interface(i interface{}) *Array { + if obj, ok := i.(LogObjectMarshaler); ok { + return a.Object(obj) + } + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i) + return a +} + +// IPAddr adds IPv4 or IPv6 address to the array +func (a *Array) IPAddr(ip net.IP) *Array { + a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip) + return a +} + +// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array +func (a *Array) IPPrefix(pfx net.IPNet) *Array { + a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx) + return a +} + +// MACAddr adds a MAC (Ethernet) address to the array +func (a *Array) MACAddr(ha net.HardwareAddr) *Array { + a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha) + return a +} + +// Dict adds the dict Event to the array +func (a *Array) Dict(dict *Event) *Array { + dict.buf = enc.AppendEndMarker(dict.buf) + a.buf = append(enc.AppendArrayDelim(a.buf), dict.buf...) + return a +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/console.go b/hotelReservation/vendor/github.com/rs/zerolog/console.go new file mode 100644 index 000000000..282798853 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/console.go @@ -0,0 +1,455 @@ +package zerolog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/mattn/go-colorable" +) + +const ( + colorBlack = iota + 30 + colorRed + colorGreen + colorYellow + colorBlue + colorMagenta + colorCyan + colorWhite + + colorBold = 1 + colorDarkGray = 90 +) + +var ( + consoleBufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, 100)) + }, + } +) + +const ( + consoleDefaultTimeFormat = time.Kitchen +) + +// Formatter transforms the input into a formatted string. +type Formatter func(interface{}) string + +// ConsoleWriter parses the JSON input and writes it in an +// (optionally) colorized, human-friendly format to Out. +type ConsoleWriter struct { + // Out is the output destination. + Out io.Writer + + // NoColor disables the colorized output. + NoColor bool + + // TimeFormat specifies the format for timestamp in output. + TimeFormat string + + // PartsOrder defines the order of parts in output. + PartsOrder []string + + // PartsExclude defines parts to not display in output. + PartsExclude []string + + // FieldsExclude defines contextual fields to not display in output. + FieldsExclude []string + + FormatTimestamp Formatter + FormatLevel Formatter + FormatCaller Formatter + FormatMessage Formatter + FormatFieldName Formatter + FormatFieldValue Formatter + FormatErrFieldName Formatter + FormatErrFieldValue Formatter + + FormatExtra func(map[string]interface{}, *bytes.Buffer) error +} + +// NewConsoleWriter creates and initializes a new ConsoleWriter. +func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter { + w := ConsoleWriter{ + Out: os.Stdout, + TimeFormat: consoleDefaultTimeFormat, + PartsOrder: consoleDefaultPartsOrder(), + } + + for _, opt := range options { + opt(&w) + } + + // Fix color on Windows + if w.Out == os.Stdout || w.Out == os.Stderr { + w.Out = colorable.NewColorable(w.Out.(*os.File)) + } + + return w +} + +// Write transforms the JSON input with formatters and appends to w.Out. +func (w ConsoleWriter) Write(p []byte) (n int, err error) { + // Fix color on Windows + if w.Out == os.Stdout || w.Out == os.Stderr { + w.Out = colorable.NewColorable(w.Out.(*os.File)) + } + + if w.PartsOrder == nil { + w.PartsOrder = consoleDefaultPartsOrder() + } + + var buf = consoleBufPool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + consoleBufPool.Put(buf) + }() + + var evt map[string]interface{} + p = decodeIfBinaryToBytes(p) + d := json.NewDecoder(bytes.NewReader(p)) + d.UseNumber() + err = d.Decode(&evt) + if err != nil { + return n, fmt.Errorf("cannot decode event: %s", err) + } + + for _, p := range w.PartsOrder { + w.writePart(buf, evt, p) + } + + w.writeFields(evt, buf) + + if w.FormatExtra != nil { + err = w.FormatExtra(evt, buf) + if err != nil { + return n, err + } + } + + err = buf.WriteByte('\n') + if err != nil { + return n, err + } + + _, err = buf.WriteTo(w.Out) + return len(p), err +} + +// writeFields appends formatted key-value pairs to buf. +func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) { + var fields = make([]string, 0, len(evt)) + for field := range evt { + var isExcluded bool + for _, excluded := range w.FieldsExclude { + if field == excluded { + isExcluded = true + break + } + } + if isExcluded { + continue + } + + switch field { + case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName: + continue + } + fields = append(fields, field) + } + sort.Strings(fields) + + // Write space only if something has already been written to the buffer, and if there are fields. + if buf.Len() > 0 && len(fields) > 0 { + buf.WriteByte(' ') + } + + // Move the "error" field to the front + ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName }) + if ei < len(fields) && fields[ei] == ErrorFieldName { + fields[ei] = "" + fields = append([]string{ErrorFieldName}, fields...) + var xfields = make([]string, 0, len(fields)) + for _, field := range fields { + if field == "" { // Skip empty fields + continue + } + xfields = append(xfields, field) + } + fields = xfields + } + + for i, field := range fields { + var fn Formatter + var fv Formatter + + if field == ErrorFieldName { + if w.FormatErrFieldName == nil { + fn = consoleDefaultFormatErrFieldName(w.NoColor) + } else { + fn = w.FormatErrFieldName + } + + if w.FormatErrFieldValue == nil { + fv = consoleDefaultFormatErrFieldValue(w.NoColor) + } else { + fv = w.FormatErrFieldValue + } + } else { + if w.FormatFieldName == nil { + fn = consoleDefaultFormatFieldName(w.NoColor) + } else { + fn = w.FormatFieldName + } + + if w.FormatFieldValue == nil { + fv = consoleDefaultFormatFieldValue + } else { + fv = w.FormatFieldValue + } + } + + buf.WriteString(fn(field)) + + switch fValue := evt[field].(type) { + case string: + if needsQuote(fValue) { + buf.WriteString(fv(strconv.Quote(fValue))) + } else { + buf.WriteString(fv(fValue)) + } + case json.Number: + buf.WriteString(fv(fValue)) + default: + b, err := InterfaceMarshalFunc(fValue) + if err != nil { + fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err) + } else { + fmt.Fprint(buf, fv(b)) + } + } + + if i < len(fields)-1 { // Skip space for last field + buf.WriteByte(' ') + } + } +} + +// writePart appends a formatted part to buf. +func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) { + var f Formatter + + if w.PartsExclude != nil && len(w.PartsExclude) > 0 { + for _, exclude := range w.PartsExclude { + if exclude == p { + return + } + } + } + + switch p { + case LevelFieldName: + if w.FormatLevel == nil { + f = consoleDefaultFormatLevel(w.NoColor) + } else { + f = w.FormatLevel + } + case TimestampFieldName: + if w.FormatTimestamp == nil { + f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor) + } else { + f = w.FormatTimestamp + } + case MessageFieldName: + if w.FormatMessage == nil { + f = consoleDefaultFormatMessage + } else { + f = w.FormatMessage + } + case CallerFieldName: + if w.FormatCaller == nil { + f = consoleDefaultFormatCaller(w.NoColor) + } else { + f = w.FormatCaller + } + default: + if w.FormatFieldValue == nil { + f = consoleDefaultFormatFieldValue + } else { + f = w.FormatFieldValue + } + } + + var s = f(evt[p]) + + if len(s) > 0 { + if buf.Len() > 0 { + buf.WriteByte(' ') // Write space only if not the first part + } + buf.WriteString(s) + } +} + +// needsQuote returns true when the string s should be quoted in output. +func needsQuote(s string) bool { + for i := range s { + if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { + return true + } + } + return false +} + +// colorize returns the string s wrapped in ANSI code c, unless disabled is true. +func colorize(s interface{}, c int, disabled bool) string { + e := os.Getenv("NO_COLOR") + if e != "" { + disabled = true + } + + if disabled { + return fmt.Sprintf("%s", s) + } + return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s) +} + +// ----- DEFAULT FORMATTERS --------------------------------------------------- + +func consoleDefaultPartsOrder() []string { + return []string{ + TimestampFieldName, + LevelFieldName, + CallerFieldName, + MessageFieldName, + } +} + +func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter { + if timeFormat == "" { + timeFormat = consoleDefaultTimeFormat + } + return func(i interface{}) string { + t := "" + switch tt := i.(type) { + case string: + ts, err := time.ParseInLocation(TimeFieldFormat, tt, time.Local) + if err != nil { + t = tt + } else { + t = ts.Local().Format(timeFormat) + } + case json.Number: + i, err := tt.Int64() + if err != nil { + t = tt.String() + } else { + var sec, nsec int64 + + switch TimeFieldFormat { + case TimeFormatUnixNano: + sec, nsec = 0, i + case TimeFormatUnixMicro: + sec, nsec = 0, int64(time.Duration(i)*time.Microsecond) + case TimeFormatUnixMs: + sec, nsec = 0, int64(time.Duration(i)*time.Millisecond) + default: + sec, nsec = i, 0 + } + + ts := time.Unix(sec, nsec) + t = ts.Format(timeFormat) + } + } + return colorize(t, colorDarkGray, noColor) + } +} + +func consoleDefaultFormatLevel(noColor bool) Formatter { + return func(i interface{}) string { + var l string + if ll, ok := i.(string); ok { + switch ll { + case LevelTraceValue: + l = colorize("TRC", colorMagenta, noColor) + case LevelDebugValue: + l = colorize("DBG", colorYellow, noColor) + case LevelInfoValue: + l = colorize("INF", colorGreen, noColor) + case LevelWarnValue: + l = colorize("WRN", colorRed, noColor) + case LevelErrorValue: + l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor) + case LevelFatalValue: + l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor) + case LevelPanicValue: + l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor) + default: + l = colorize(ll, colorBold, noColor) + } + } else { + if i == nil { + l = colorize("???", colorBold, noColor) + } else { + l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3] + } + } + return l + } +} + +func consoleDefaultFormatCaller(noColor bool) Formatter { + return func(i interface{}) string { + var c string + if cc, ok := i.(string); ok { + c = cc + } + if len(c) > 0 { + if cwd, err := os.Getwd(); err == nil { + if rel, err := filepath.Rel(cwd, c); err == nil { + c = rel + } + } + c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor) + } + return c + } +} + +func consoleDefaultFormatMessage(i interface{}) string { + if i == nil { + return "" + } + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) + } +} + +func consoleDefaultFormatFieldValue(i interface{}) string { + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatErrFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) + } +} + +func consoleDefaultFormatErrFieldValue(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s", i), colorRed, noColor) + } +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/context.go b/hotelReservation/vendor/github.com/rs/zerolog/context.go new file mode 100644 index 000000000..fc62ad9c1 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/context.go @@ -0,0 +1,449 @@ +package zerolog + +import ( + "context" + "fmt" + "io/ioutil" + "math" + "net" + "time" +) + +// Context configures a new sub-logger with contextual fields. +type Context struct { + l Logger +} + +// Logger returns the logger with the context previously set. +func (c Context) Logger() Logger { + return c.l +} + +// Fields is a helper function to use a map or slice to set fields using type assertion. +// Only map[string]interface{} and []interface{} are accepted. []interface{} must +// alternate string keys and arbitrary values, and extraneous ones are ignored. +func (c Context) Fields(fields interface{}) Context { + c.l.context = appendFields(c.l.context, fields) + return c +} + +// Dict adds the field key with the dict to the logger context. +func (c Context) Dict(key string, dict *Event) Context { + dict.buf = enc.AppendEndMarker(dict.buf) + c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...) + putEvent(dict) + return c +} + +// Array adds the field key with an array to the event context. +// Use zerolog.Arr() to create the array or pass a type that +// implement the LogArrayMarshaler interface. +func (c Context) Array(key string, arr LogArrayMarshaler) Context { + c.l.context = enc.AppendKey(c.l.context, key) + if arr, ok := arr.(*Array); ok { + c.l.context = arr.write(c.l.context) + return c + } + var a *Array + if aa, ok := arr.(*Array); ok { + a = aa + } else { + a = Arr() + arr.MarshalZerologArray(a) + } + c.l.context = a.write(c.l.context) + return c +} + +// Object marshals an object that implement the LogObjectMarshaler interface. +func (c Context) Object(key string, obj LogObjectMarshaler) Context { + e := newEvent(LevelWriterAdapter{ioutil.Discard}, 0) + e.Object(key, obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. +func (c Context) EmbedObject(obj LogObjectMarshaler) Context { + e := newEvent(LevelWriterAdapter{ioutil.Discard}, 0) + e.EmbedObject(obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// Str adds the field key with val as a string to the logger context. +func (c Context) Str(key, val string) Context { + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val) + return c +} + +// Strs adds the field key with val as a string to the logger context. +func (c Context) Strs(key string, vals []string) Context { + c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals) + return c +} + +// Stringer adds the field key with val.String() (or null if val is nil) to the logger context. +func (c Context) Stringer(key string, val fmt.Stringer) Context { + if val != nil { + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val.String()) + return c + } + + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), nil) + return c +} + +// Bytes adds the field key with val as a []byte to the logger context. +func (c Context) Bytes(key string, val []byte) Context { + c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val) + return c +} + +// Hex adds the field key with val as a hex string to the logger context. +func (c Context) Hex(key string, val []byte) Context { + c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val) + return c +} + +// RawJSON adds already encoded JSON to context. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (c Context) RawJSON(key string, b []byte) Context { + c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b) + return c +} + +// AnErr adds the field key with serialized err to the logger context. +func (c Context) AnErr(key string, err error) Context { + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return c + case LogObjectMarshaler: + return c.Object(key, m) + case error: + if m == nil || isNilValue(m) { + return c + } else { + return c.Str(key, m.Error()) + } + case string: + return c.Str(key, m) + default: + return c.Interface(key, m) + } +} + +// Errs adds the field key with errs as an array of serialized errors to the +// logger context. +func (c Context) Errs(key string, errs []error) Context { + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + if m == nil || isNilValue(m) { + arr = arr.Interface(nil) + } else { + arr = arr.Str(m.Error()) + } + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return c.Array(key, arr) +} + +// Err adds the field "error" with serialized err to the logger context. +func (c Context) Err(err error) Context { + return c.AnErr(ErrorFieldName, err) +} + +// Ctx adds the context.Context to the logger context. The context.Context is +// not rendered in the error message, but is made available for hooks to use. +// A typical use case is to extract tracing information from the +// context.Context. +func (c Context) Ctx(ctx context.Context) Context { + c.l.ctx = ctx + return c +} + +// Bool adds the field key with val as a bool to the logger context. +func (c Context) Bool(key string, b bool) Context { + c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b) + return c +} + +// Bools adds the field key with val as a []bool to the logger context. +func (c Context) Bools(key string, b []bool) Context { + c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b) + return c +} + +// Int adds the field key with i as a int to the logger context. +func (c Context) Int(key string, i int) Context { + c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints adds the field key with i as a []int to the logger context. +func (c Context) Ints(key string, i []int) Context { + c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int8 adds the field key with i as a int8 to the logger context. +func (c Context) Int8(key string, i int8) Context { + c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints8 adds the field key with i as a []int8 to the logger context. +func (c Context) Ints8(key string, i []int8) Context { + c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int16 adds the field key with i as a int16 to the logger context. +func (c Context) Int16(key string, i int16) Context { + c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints16 adds the field key with i as a []int16 to the logger context. +func (c Context) Ints16(key string, i []int16) Context { + c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int32 adds the field key with i as a int32 to the logger context. +func (c Context) Int32(key string, i int32) Context { + c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints32 adds the field key with i as a []int32 to the logger context. +func (c Context) Ints32(key string, i []int32) Context { + c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int64 adds the field key with i as a int64 to the logger context. +func (c Context) Int64(key string, i int64) Context { + c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints64 adds the field key with i as a []int64 to the logger context. +func (c Context) Ints64(key string, i []int64) Context { + c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint adds the field key with i as a uint to the logger context. +func (c Context) Uint(key string, i uint) Context { + c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints adds the field key with i as a []uint to the logger context. +func (c Context) Uints(key string, i []uint) Context { + c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint8 adds the field key with i as a uint8 to the logger context. +func (c Context) Uint8(key string, i uint8) Context { + c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints8 adds the field key with i as a []uint8 to the logger context. +func (c Context) Uints8(key string, i []uint8) Context { + c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint16 adds the field key with i as a uint16 to the logger context. +func (c Context) Uint16(key string, i uint16) Context { + c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints16 adds the field key with i as a []uint16 to the logger context. +func (c Context) Uints16(key string, i []uint16) Context { + c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint32 adds the field key with i as a uint32 to the logger context. +func (c Context) Uint32(key string, i uint32) Context { + c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints32 adds the field key with i as a []uint32 to the logger context. +func (c Context) Uints32(key string, i []uint32) Context { + c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint64 adds the field key with i as a uint64 to the logger context. +func (c Context) Uint64(key string, i uint64) Context { + c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints64 adds the field key with i as a []uint64 to the logger context. +func (c Context) Uints64(key string, i []uint64) Context { + c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Float32 adds the field key with f as a float32 to the logger context. +func (c Context) Float32(key string, f float32) Context { + c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f) + return c +} + +// Floats32 adds the field key with f as a []float32 to the logger context. +func (c Context) Floats32(key string, f []float32) Context { + c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f) + return c +} + +// Float64 adds the field key with f as a float64 to the logger context. +func (c Context) Float64(key string, f float64) Context { + c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f) + return c +} + +// Floats64 adds the field key with f as a []float64 to the logger context. +func (c Context) Floats64(key string, f []float64) Context { + c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f) + return c +} + +type timestampHook struct{} + +func (ts timestampHook) Run(e *Event, level Level, msg string) { + e.Timestamp() +} + +var th = timestampHook{} + +// Timestamp adds the current local time to the logger context with the "time" key, formatted using zerolog.TimeFieldFormat. +// To customize the key name, change zerolog.TimestampFieldName. +// To customize the time format, change zerolog.TimeFieldFormat. +// +// NOTE: It won't dedupe the "time" key if the *Context has one already. +func (c Context) Timestamp() Context { + c.l = c.l.Hook(th) + return c +} + +// Time adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (c Context) Time(key string, t time.Time) Context { + c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) + return c +} + +// Times adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (c Context) Times(key string, t []time.Time) Context { + c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) + return c +} + +// Dur adds the fields key with d divided by unit and stored as a float. +func (c Context) Dur(key string, d time.Duration) Context { + c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + return c +} + +// Durs adds the fields key with d divided by unit and stored as a float. +func (c Context) Durs(key string, d []time.Duration) Context { + c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + return c +} + +// Interface adds the field key with obj marshaled using reflection. +func (c Context) Interface(key string, i interface{}) Context { + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i) + return c +} + +// Any is a wrapper around Context.Interface. +func (c Context) Any(key string, i interface{}) Context { + return c.Interface(key, i) +} + +type callerHook struct { + callerSkipFrameCount int +} + +func newCallerHook(skipFrameCount int) callerHook { + return callerHook{callerSkipFrameCount: skipFrameCount} +} + +func (ch callerHook) Run(e *Event, level Level, msg string) { + switch ch.callerSkipFrameCount { + case useGlobalSkipFrameCount: + // Extra frames to skip (added by hook infra). + e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount) + default: + // Extra frames to skip (added by hook infra). + e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount) + } +} + +// useGlobalSkipFrameCount acts as a flag to informat callerHook.Run +// to use the global CallerSkipFrameCount. +const useGlobalSkipFrameCount = math.MinInt32 + +// ch is the default caller hook using the global CallerSkipFrameCount. +var ch = newCallerHook(useGlobalSkipFrameCount) + +// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. +func (c Context) Caller() Context { + c.l = c.l.Hook(ch) + return c +} + +// CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key. +// The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger. +// If set to -1 the global CallerSkipFrameCount will be used. +func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context { + c.l = c.l.Hook(newCallerHook(skipFrameCount)) + return c +} + +// Stack enables stack trace printing for the error passed to Err(). +func (c Context) Stack() Context { + c.l.stack = true + return c +} + +// IPAddr adds IPv4 or IPv6 Address to the context +func (c Context) IPAddr(key string, ip net.IP) Context { + c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip) + return c +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context +func (c Context) IPPrefix(key string, pfx net.IPNet) Context { + c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx) + return c +} + +// MACAddr adds MAC address to the context +func (c Context) MACAddr(key string, ha net.HardwareAddr) Context { + c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha) + return c +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/ctx.go b/hotelReservation/vendor/github.com/rs/zerolog/ctx.go new file mode 100644 index 000000000..60432d153 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/ctx.go @@ -0,0 +1,52 @@ +package zerolog + +import ( + "context" +) + +var disabledLogger *Logger + +func init() { + SetGlobalLevel(TraceLevel) + l := Nop() + disabledLogger = &l +} + +type ctxKey struct{} + +// WithContext returns a copy of ctx with the receiver attached. The Logger +// attached to the provided Context (if any) will not be effected. If the +// receiver's log level is Disabled it will only be attached to the returned +// Context if the provided Context has a previously attached Logger. If the +// provided Context has no attached Logger, a Disabled Logger will not be +// attached. +// +// Note: to modify the existing Logger attached to a Context (instead of +// replacing it in a new Context), use UpdateContext with the following +// notation: +// +// ctx := r.Context() +// l := zerolog.Ctx(ctx) +// l.UpdateContext(func(c Context) Context { +// return c.Str("bar", "baz") +// }) +// +func (l Logger) WithContext(ctx context.Context) context.Context { + if _, ok := ctx.Value(ctxKey{}).(*Logger); !ok && l.level == Disabled { + // Do not store disabled logger. + return ctx + } + return context.WithValue(ctx, ctxKey{}, &l) +} + +// Ctx returns the Logger associated with the ctx. If no logger +// is associated, DefaultContextLogger is returned, unless DefaultContextLogger +// is nil, in which case a disabled logger is returned. +func Ctx(ctx context.Context) *Logger { + if l, ok := ctx.Value(ctxKey{}).(*Logger); ok { + return l + } else if l = DefaultContextLogger; l != nil { + return l + } + return disabledLogger +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/encoder.go b/hotelReservation/vendor/github.com/rs/zerolog/encoder.go new file mode 100644 index 000000000..09b24e80c --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/encoder.go @@ -0,0 +1,56 @@ +package zerolog + +import ( + "net" + "time" +) + +type encoder interface { + AppendArrayDelim(dst []byte) []byte + AppendArrayEnd(dst []byte) []byte + AppendArrayStart(dst []byte) []byte + AppendBeginMarker(dst []byte) []byte + AppendBool(dst []byte, val bool) []byte + AppendBools(dst []byte, vals []bool) []byte + AppendBytes(dst, s []byte) []byte + AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte + AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte + AppendEndMarker(dst []byte) []byte + AppendFloat32(dst []byte, val float32) []byte + AppendFloat64(dst []byte, val float64) []byte + AppendFloats32(dst []byte, vals []float32) []byte + AppendFloats64(dst []byte, vals []float64) []byte + AppendHex(dst, s []byte) []byte + AppendIPAddr(dst []byte, ip net.IP) []byte + AppendIPPrefix(dst []byte, pfx net.IPNet) []byte + AppendInt(dst []byte, val int) []byte + AppendInt16(dst []byte, val int16) []byte + AppendInt32(dst []byte, val int32) []byte + AppendInt64(dst []byte, val int64) []byte + AppendInt8(dst []byte, val int8) []byte + AppendInterface(dst []byte, i interface{}) []byte + AppendInts(dst []byte, vals []int) []byte + AppendInts16(dst []byte, vals []int16) []byte + AppendInts32(dst []byte, vals []int32) []byte + AppendInts64(dst []byte, vals []int64) []byte + AppendInts8(dst []byte, vals []int8) []byte + AppendKey(dst []byte, key string) []byte + AppendLineBreak(dst []byte) []byte + AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte + AppendNil(dst []byte) []byte + AppendObjectData(dst []byte, o []byte) []byte + AppendString(dst []byte, s string) []byte + AppendStrings(dst []byte, vals []string) []byte + AppendTime(dst []byte, t time.Time, format string) []byte + AppendTimes(dst []byte, vals []time.Time, format string) []byte + AppendUint(dst []byte, val uint) []byte + AppendUint16(dst []byte, val uint16) []byte + AppendUint32(dst []byte, val uint32) []byte + AppendUint64(dst []byte, val uint64) []byte + AppendUint8(dst []byte, val uint8) []byte + AppendUints(dst []byte, vals []uint) []byte + AppendUints16(dst []byte, vals []uint16) []byte + AppendUints32(dst []byte, vals []uint32) []byte + AppendUints64(dst []byte, vals []uint64) []byte + AppendUints8(dst []byte, vals []uint8) []byte +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/encoder_cbor.go b/hotelReservation/vendor/github.com/rs/zerolog/encoder_cbor.go new file mode 100644 index 000000000..36cb994b8 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/encoder_cbor.go @@ -0,0 +1,45 @@ +// +build binary_log + +package zerolog + +// This file contains bindings to do binary encoding. + +import ( + "github.com/rs/zerolog/internal/cbor" +) + +var ( + _ encoder = (*cbor.Encoder)(nil) + + enc = cbor.Encoder{} +) + +func init() { + // using closure to reflect the changes at runtime. + cbor.JSONMarshalFunc = func(v interface{}) ([]byte, error) { + return InterfaceMarshalFunc(v) + } +} + +func appendJSON(dst []byte, j []byte) []byte { + return cbor.AppendEmbeddedJSON(dst, j) +} +func appendCBOR(dst []byte, c []byte) []byte { + return cbor.AppendEmbeddedCBOR(dst, c) +} + +// decodeIfBinaryToString - converts a binary formatted log msg to a +// JSON formatted String Log message. +func decodeIfBinaryToString(in []byte) string { + return cbor.DecodeIfBinaryToString(in) +} + +func decodeObjectToStr(in []byte) string { + return cbor.DecodeObjectToStr(in) +} + +// decodeIfBinaryToBytes - converts a binary formatted log msg to a +// JSON formatted Bytes Log message. +func decodeIfBinaryToBytes(in []byte) []byte { + return cbor.DecodeIfBinaryToBytes(in) +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/encoder_json.go b/hotelReservation/vendor/github.com/rs/zerolog/encoder_json.go new file mode 100644 index 000000000..6f96c68ad --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/encoder_json.go @@ -0,0 +1,51 @@ +// +build !binary_log + +package zerolog + +// encoder_json.go file contains bindings to generate +// JSON encoded byte stream. + +import ( + "encoding/base64" + "github.com/rs/zerolog/internal/json" +) + +var ( + _ encoder = (*json.Encoder)(nil) + + enc = json.Encoder{} +) + +func init() { + // using closure to reflect the changes at runtime. + json.JSONMarshalFunc = func(v interface{}) ([]byte, error) { + return InterfaceMarshalFunc(v) + } +} + +func appendJSON(dst []byte, j []byte) []byte { + return append(dst, j...) +} +func appendCBOR(dst []byte, cbor []byte) []byte { + dst = append(dst, []byte("\"data:application/cbor;base64,")...) + l := len(dst) + enc := base64.StdEncoding + n := enc.EncodedLen(len(cbor)) + for i := 0; i < n; i++ { + dst = append(dst, '.') + } + enc.Encode(dst[l:], cbor) + return append(dst, '"') +} + +func decodeIfBinaryToString(in []byte) string { + return string(in) +} + +func decodeObjectToStr(in []byte) string { + return string(in) +} + +func decodeIfBinaryToBytes(in []byte) []byte { + return in +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/event.go b/hotelReservation/vendor/github.com/rs/zerolog/event.go new file mode 100644 index 000000000..2a5d3b087 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/event.go @@ -0,0 +1,830 @@ +package zerolog + +import ( + "context" + "fmt" + "net" + "os" + "runtime" + "sync" + "time" +) + +var eventPool = &sync.Pool{ + New: func() interface{} { + return &Event{ + buf: make([]byte, 0, 500), + } + }, +} + +// Event represents a log event. It is instanced by one of the level method of +// Logger and finalized by the Msg or Msgf method. +type Event struct { + buf []byte + w LevelWriter + level Level + done func(msg string) + stack bool // enable error stack trace + ch []Hook // hooks from context + skipFrame int // The number of additional frames to skip when printing the caller. + ctx context.Context // Optional Go context for event +} + +func putEvent(e *Event) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(e.buf) > maxSize { + return + } + eventPool.Put(e) +} + +// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface +// to be implemented by types used with Event/Context's Object methods. +type LogObjectMarshaler interface { + MarshalZerologObject(e *Event) +} + +// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface +// to be implemented by types used with Event/Context's Array methods. +type LogArrayMarshaler interface { + MarshalZerologArray(a *Array) +} + +func newEvent(w LevelWriter, level Level) *Event { + e := eventPool.Get().(*Event) + e.buf = e.buf[:0] + e.ch = nil + e.buf = enc.AppendBeginMarker(e.buf) + e.w = w + e.level = level + e.stack = false + e.skipFrame = 0 + return e +} + +func (e *Event) write() (err error) { + if e == nil { + return nil + } + if e.level != Disabled { + e.buf = enc.AppendEndMarker(e.buf) + e.buf = enc.AppendLineBreak(e.buf) + if e.w != nil { + _, err = e.w.WriteLevel(e.level, e.buf) + } + } + putEvent(e) + return +} + +// Enabled return false if the *Event is going to be filtered out by +// log level or sampling. +func (e *Event) Enabled() bool { + return e != nil && e.level != Disabled +} + +// Discard disables the event so Msg(f) won't print it. +func (e *Event) Discard() *Event { + if e == nil { + return e + } + e.level = Disabled + return nil +} + +// Msg sends the *Event with msg added as the message field if not empty. +// +// NOTICE: once this method is called, the *Event should be disposed. +// Calling Msg twice can have unexpected result. +func (e *Event) Msg(msg string) { + if e == nil { + return + } + e.msg(msg) +} + +// Send is equivalent to calling Msg(""). +// +// NOTICE: once this method is called, the *Event should be disposed. +func (e *Event) Send() { + if e == nil { + return + } + e.msg("") +} + +// Msgf sends the event with formatted msg added as the message field if not empty. +// +// NOTICE: once this method is called, the *Event should be disposed. +// Calling Msgf twice can have unexpected result. +func (e *Event) Msgf(format string, v ...interface{}) { + if e == nil { + return + } + e.msg(fmt.Sprintf(format, v...)) +} + +func (e *Event) MsgFunc(createMsg func() string) { + if e == nil { + return + } + e.msg(createMsg()) +} + +func (e *Event) msg(msg string) { + for _, hook := range e.ch { + hook.Run(e, e.level, msg) + } + if msg != "" { + e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg) + } + if e.done != nil { + defer e.done(msg) + } + if err := e.write(); err != nil { + if ErrorHandler != nil { + ErrorHandler(err) + } else { + fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err) + } + } +} + +// Fields is a helper function to use a map or slice to set fields using type assertion. +// Only map[string]interface{} and []interface{} are accepted. []interface{} must +// alternate string keys and arbitrary values, and extraneous ones are ignored. +func (e *Event) Fields(fields interface{}) *Event { + if e == nil { + return e + } + e.buf = appendFields(e.buf, fields) + return e +} + +// Dict adds the field key with a dict to the event context. +// Use zerolog.Dict() to create the dictionary. +func (e *Event) Dict(key string, dict *Event) *Event { + if e == nil { + return e + } + dict.buf = enc.AppendEndMarker(dict.buf) + e.buf = append(enc.AppendKey(e.buf, key), dict.buf...) + putEvent(dict) + return e +} + +// Dict creates an Event to be used with the *Event.Dict method. +// Call usual field methods like Str, Int etc to add fields to this +// event and give it as argument the *Event.Dict method. +func Dict() *Event { + return newEvent(nil, 0) +} + +// Array adds the field key with an array to the event context. +// Use zerolog.Arr() to create the array or pass a type that +// implement the LogArrayMarshaler interface. +func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { + if e == nil { + return e + } + e.buf = enc.AppendKey(e.buf, key) + var a *Array + if aa, ok := arr.(*Array); ok { + a = aa + } else { + a = Arr() + arr.MarshalZerologArray(a) + } + e.buf = a.write(e.buf) + return e +} + +func (e *Event) appendObject(obj LogObjectMarshaler) { + e.buf = enc.AppendBeginMarker(e.buf) + obj.MarshalZerologObject(e) + e.buf = enc.AppendEndMarker(e.buf) +} + +// Object marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) Object(key string, obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + e.buf = enc.AppendKey(e.buf, key) + if obj == nil { + e.buf = enc.AppendNil(e.buf) + + return e + } + + e.appendObject(obj) + return e +} + +// Func allows an anonymous func to run only if the event is enabled. +func (e *Event) Func(f func(e *Event)) *Event { + if e != nil && e.Enabled() { + f(e) + } + return e +} + +// EmbedObject marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + if obj == nil { + return e + } + obj.MarshalZerologObject(e) + return e +} + +// Str adds the field key with val as a string to the *Event context. +func (e *Event) Str(key, val string) *Event { + if e == nil { + return e + } + e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val) + return e +} + +// Strs adds the field key with vals as a []string to the *Event context. +func (e *Event) Strs(key string, vals []string) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals) + return e +} + +// Stringer adds the field key with val.String() (or null if val is nil) +// to the *Event context. +func (e *Event) Stringer(key string, val fmt.Stringer) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStringer(enc.AppendKey(e.buf, key), val) + return e +} + +// Stringers adds the field key with vals where each individual val +// is used as val.String() (or null if val is empty) to the *Event +// context. +func (e *Event) Stringers(key string, vals []fmt.Stringer) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStringers(enc.AppendKey(e.buf, key), vals) + return e +} + +// Bytes adds the field key with val as a string to the *Event context. +// +// Runes outside of normal ASCII ranges will be hex-encoded in the resulting +// JSON. +func (e *Event) Bytes(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val) + return e +} + +// Hex adds the field key with val as a hex string to the *Event context. +func (e *Event) Hex(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val) + return e +} + +// RawJSON adds already encoded JSON to the log line under key. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (e *Event) RawJSON(key string, b []byte) *Event { + if e == nil { + return e + } + e.buf = appendJSON(enc.AppendKey(e.buf, key), b) + return e +} + +// RawCBOR adds already encoded CBOR to the log line under key. +// +// No sanity check is performed on b +// Note: The full featureset of CBOR is supported as data will not be mapped to json but stored as data-url +func (e *Event) RawCBOR(key string, b []byte) *Event { + if e == nil { + return e + } + e.buf = appendCBOR(enc.AppendKey(e.buf, key), b) + return e +} + +// AnErr adds the field key with serialized err to the *Event context. +// If err is nil, no field is added. +func (e *Event) AnErr(key string, err error) *Event { + if e == nil { + return e + } + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return e + case LogObjectMarshaler: + return e.Object(key, m) + case error: + if m == nil || isNilValue(m) { + return e + } else { + return e.Str(key, m.Error()) + } + case string: + return e.Str(key, m) + default: + return e.Interface(key, m) + } +} + +// Errs adds the field key with errs as an array of serialized errors to the +// *Event context. +func (e *Event) Errs(key string, errs []error) *Event { + if e == nil { + return e + } + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + arr = arr.Err(m) + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return e.Array(key, arr) +} + +// Err adds the field "error" with serialized err to the *Event context. +// If err is nil, no field is added. +// +// To customize the key name, change zerolog.ErrorFieldName. +// +// If Stack() has been called before and zerolog.ErrorStackMarshaler is defined, +// the err is passed to ErrorStackMarshaler and the result is appended to the +// zerolog.ErrorStackFieldName. +func (e *Event) Err(err error) *Event { + if e == nil { + return e + } + if e.stack && ErrorStackMarshaler != nil { + switch m := ErrorStackMarshaler(err).(type) { + case nil: + case LogObjectMarshaler: + e.Object(ErrorStackFieldName, m) + case error: + if m != nil && !isNilValue(m) { + e.Str(ErrorStackFieldName, m.Error()) + } + case string: + e.Str(ErrorStackFieldName, m) + default: + e.Interface(ErrorStackFieldName, m) + } + } + return e.AnErr(ErrorFieldName, err) +} + +// Stack enables stack trace printing for the error passed to Err(). +// +// ErrorStackMarshaler must be set for this method to do something. +func (e *Event) Stack() *Event { + if e != nil { + e.stack = true + } + return e +} + +// Ctx adds the Go Context to the *Event context. The context is not rendered +// in the output message, but is available to hooks and to Func() calls via the +// GetCtx() accessor. A typical use case is to extract tracing information from +// the Go Ctx. +func (e *Event) Ctx(ctx context.Context) *Event { + if e != nil { + e.ctx = ctx + } + return e +} + +// GetCtx retrieves the Go context.Context which is optionally stored in the +// Event. This allows Hooks and functions passed to Func() to retrieve values +// which are stored in the context.Context. This can be useful in tracing, +// where span information is commonly propagated in the context.Context. +func (e *Event) GetCtx() context.Context { + if e == nil || e.ctx == nil { + return context.Background() + } + return e.ctx +} + +// Bool adds the field key with val as a bool to the *Event context. +func (e *Event) Bool(key string, b bool) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b) + return e +} + +// Bools adds the field key with val as a []bool to the *Event context. +func (e *Event) Bools(key string, b []bool) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b) + return e +} + +// Int adds the field key with i as a int to the *Event context. +func (e *Event) Int(key string, i int) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints adds the field key with i as a []int to the *Event context. +func (e *Event) Ints(key string, i []int) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i) + return e +} + +// Int8 adds the field key with i as a int8 to the *Event context. +func (e *Event) Int8(key string, i int8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints8 adds the field key with i as a []int8 to the *Event context. +func (e *Event) Ints8(key string, i []int8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i) + return e +} + +// Int16 adds the field key with i as a int16 to the *Event context. +func (e *Event) Int16(key string, i int16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints16 adds the field key with i as a []int16 to the *Event context. +func (e *Event) Ints16(key string, i []int16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i) + return e +} + +// Int32 adds the field key with i as a int32 to the *Event context. +func (e *Event) Int32(key string, i int32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints32 adds the field key with i as a []int32 to the *Event context. +func (e *Event) Ints32(key string, i []int32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i) + return e +} + +// Int64 adds the field key with i as a int64 to the *Event context. +func (e *Event) Int64(key string, i int64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints64 adds the field key with i as a []int64 to the *Event context. +func (e *Event) Ints64(key string, i []int64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint adds the field key with i as a uint to the *Event context. +func (e *Event) Uint(key string, i uint) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints adds the field key with i as a []int to the *Event context. +func (e *Event) Uints(key string, i []uint) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint8 adds the field key with i as a uint8 to the *Event context. +func (e *Event) Uint8(key string, i uint8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints8 adds the field key with i as a []int8 to the *Event context. +func (e *Event) Uints8(key string, i []uint8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint16 adds the field key with i as a uint16 to the *Event context. +func (e *Event) Uint16(key string, i uint16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints16 adds the field key with i as a []int16 to the *Event context. +func (e *Event) Uints16(key string, i []uint16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint32 adds the field key with i as a uint32 to the *Event context. +func (e *Event) Uint32(key string, i uint32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints32 adds the field key with i as a []int32 to the *Event context. +func (e *Event) Uints32(key string, i []uint32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint64 adds the field key with i as a uint64 to the *Event context. +func (e *Event) Uint64(key string, i uint64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints64 adds the field key with i as a []int64 to the *Event context. +func (e *Event) Uints64(key string, i []uint64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i) + return e +} + +// Float32 adds the field key with f as a float32 to the *Event context. +func (e *Event) Float32(key string, f float32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f) + return e +} + +// Floats32 adds the field key with f as a []float32 to the *Event context. +func (e *Event) Floats32(key string, f []float32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f) + return e +} + +// Float64 adds the field key with f as a float64 to the *Event context. +func (e *Event) Float64(key string, f float64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f) + return e +} + +// Floats64 adds the field key with f as a []float64 to the *Event context. +func (e *Event) Floats64(key string, f []float64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f) + return e +} + +// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key. +// To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one +// already. +func (e *Event) Timestamp() *Event { + if e == nil { + return e + } + e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) + return e +} + +// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat. +func (e *Event) Time(key string, t time.Time) *Event { + if e == nil { + return e + } + e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat) + return e +} + +// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat. +func (e *Event) Times(key string, t []time.Time) *Event { + if e == nil { + return e + } + e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat) + return e +} + +// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit. +// If zerolog.DurationFieldInteger is true, durations are rendered as integer +// instead of float. +func (e *Event) Dur(key string, d time.Duration) *Event { + if e == nil { + return e + } + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit. +// If zerolog.DurationFieldInteger is true, durations are rendered as integer +// instead of float. +func (e *Event) Durs(key string, d []time.Duration) *Event { + if e == nil { + return e + } + e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// TimeDiff adds the field key with positive duration between time t and start. +// If time t is not greater than start, duration will be 0. +// Duration format follows the same principle as Dur(). +func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { + if e == nil { + return e + } + var d time.Duration + if t.After(start) { + d = t.Sub(start) + } + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// Any is a wrapper around Event.Interface. +func (e *Event) Any(key string, i interface{}) *Event { + return e.Interface(key, i) +} + +// Interface adds the field key with i marshaled using reflection. +func (e *Event) Interface(key string, i interface{}) *Event { + if e == nil { + return e + } + if obj, ok := i.(LogObjectMarshaler); ok { + return e.Object(key, obj) + } + e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i) + return e +} + +// Type adds the field key with val's type using reflection. +func (e *Event) Type(key string, val interface{}) *Event { + if e == nil { + return e + } + e.buf = enc.AppendType(enc.AppendKey(e.buf, key), val) + return e +} + +// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames. +// This includes those added via hooks from the context. +func (e *Event) CallerSkipFrame(skip int) *Event { + if e == nil { + return e + } + e.skipFrame += skip + return e +} + +// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. +// The argument skip is the number of stack frames to ascend +// Skip If not passed, use the global variable CallerSkipFrameCount +func (e *Event) Caller(skip ...int) *Event { + sk := CallerSkipFrameCount + if len(skip) > 0 { + sk = skip[0] + CallerSkipFrameCount + } + return e.caller(sk) +} + +func (e *Event) caller(skip int) *Event { + if e == nil { + return e + } + pc, file, line, ok := runtime.Caller(skip + e.skipFrame) + if !ok { + return e + } + e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(pc, file, line)) + return e +} + +// IPAddr adds IPv4 or IPv6 Address to the event +func (e *Event) IPAddr(key string, ip net.IP) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip) + return e +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event +func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx) + return e +} + +// MACAddr adds MAC address to the event +func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event { + if e == nil { + return e + } + e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha) + return e +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/fields.go b/hotelReservation/vendor/github.com/rs/zerolog/fields.go new file mode 100644 index 000000000..c1eb5ce79 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/fields.go @@ -0,0 +1,277 @@ +package zerolog + +import ( + "encoding/json" + "net" + "sort" + "time" + "unsafe" +) + +func isNilValue(i interface{}) bool { + return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0 +} + +func appendFields(dst []byte, fields interface{}) []byte { + switch fields := fields.(type) { + case []interface{}: + if n := len(fields); n&0x1 == 1 { // odd number + fields = fields[:n-1] + } + dst = appendFieldList(dst, fields) + case map[string]interface{}: + keys := make([]string, 0, len(fields)) + for key := range fields { + keys = append(keys, key) + } + sort.Strings(keys) + kv := make([]interface{}, 2) + for _, key := range keys { + kv[0], kv[1] = key, fields[key] + dst = appendFieldList(dst, kv) + } + } + return dst +} + +func appendFieldList(dst []byte, kvList []interface{}) []byte { + for i, n := 0, len(kvList); i < n; i += 2 { + key, val := kvList[i], kvList[i+1] + if key, ok := key.(string); ok { + dst = enc.AppendKey(dst, key) + } else { + continue + } + if val, ok := val.(LogObjectMarshaler); ok { + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(val) + dst = append(dst, e.buf...) + putEvent(e) + continue + } + switch val := val.(type) { + case string: + dst = enc.AppendString(dst, val) + case []byte: + dst = enc.AppendBytes(dst, val) + case error: + switch m := ErrorMarshalFunc(val).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + dst = enc.AppendNil(dst) + } else { + dst = enc.AppendString(dst, m.Error()) + } + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + case []error: + dst = enc.AppendArrayStart(dst) + for i, err := range val { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + dst = enc.AppendNil(dst) + } else { + dst = enc.AppendString(dst, m.Error()) + } + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + + if i < (len(val) - 1) { + enc.AppendArrayDelim(dst) + } + } + dst = enc.AppendArrayEnd(dst) + case bool: + dst = enc.AppendBool(dst, val) + case int: + dst = enc.AppendInt(dst, val) + case int8: + dst = enc.AppendInt8(dst, val) + case int16: + dst = enc.AppendInt16(dst, val) + case int32: + dst = enc.AppendInt32(dst, val) + case int64: + dst = enc.AppendInt64(dst, val) + case uint: + dst = enc.AppendUint(dst, val) + case uint8: + dst = enc.AppendUint8(dst, val) + case uint16: + dst = enc.AppendUint16(dst, val) + case uint32: + dst = enc.AppendUint32(dst, val) + case uint64: + dst = enc.AppendUint64(dst, val) + case float32: + dst = enc.AppendFloat32(dst, val) + case float64: + dst = enc.AppendFloat64(dst, val) + case time.Time: + dst = enc.AppendTime(dst, val, TimeFieldFormat) + case time.Duration: + dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) + case *string: + if val != nil { + dst = enc.AppendString(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *bool: + if val != nil { + dst = enc.AppendBool(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int: + if val != nil { + dst = enc.AppendInt(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int8: + if val != nil { + dst = enc.AppendInt8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int16: + if val != nil { + dst = enc.AppendInt16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int32: + if val != nil { + dst = enc.AppendInt32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int64: + if val != nil { + dst = enc.AppendInt64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint: + if val != nil { + dst = enc.AppendUint(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint8: + if val != nil { + dst = enc.AppendUint8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint16: + if val != nil { + dst = enc.AppendUint16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint32: + if val != nil { + dst = enc.AppendUint32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint64: + if val != nil { + dst = enc.AppendUint64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float32: + if val != nil { + dst = enc.AppendFloat32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float64: + if val != nil { + dst = enc.AppendFloat64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *time.Time: + if val != nil { + dst = enc.AppendTime(dst, *val, TimeFieldFormat) + } else { + dst = enc.AppendNil(dst) + } + case *time.Duration: + if val != nil { + dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger) + } else { + dst = enc.AppendNil(dst) + } + case []string: + dst = enc.AppendStrings(dst, val) + case []bool: + dst = enc.AppendBools(dst, val) + case []int: + dst = enc.AppendInts(dst, val) + case []int8: + dst = enc.AppendInts8(dst, val) + case []int16: + dst = enc.AppendInts16(dst, val) + case []int32: + dst = enc.AppendInts32(dst, val) + case []int64: + dst = enc.AppendInts64(dst, val) + case []uint: + dst = enc.AppendUints(dst, val) + // case []uint8: + // dst = enc.AppendUints8(dst, val) + case []uint16: + dst = enc.AppendUints16(dst, val) + case []uint32: + dst = enc.AppendUints32(dst, val) + case []uint64: + dst = enc.AppendUints64(dst, val) + case []float32: + dst = enc.AppendFloats32(dst, val) + case []float64: + dst = enc.AppendFloats64(dst, val) + case []time.Time: + dst = enc.AppendTimes(dst, val, TimeFieldFormat) + case []time.Duration: + dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) + case nil: + dst = enc.AppendNil(dst) + case net.IP: + dst = enc.AppendIPAddr(dst, val) + case net.IPNet: + dst = enc.AppendIPPrefix(dst, val) + case net.HardwareAddr: + dst = enc.AppendMACAddr(dst, val) + case json.RawMessage: + dst = appendJSON(dst, val) + default: + dst = enc.AppendInterface(dst, val) + } + } + return dst +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/globals.go b/hotelReservation/vendor/github.com/rs/zerolog/globals.go new file mode 100644 index 000000000..e1067deb6 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/globals.go @@ -0,0 +1,142 @@ +package zerolog + +import ( + "encoding/json" + "strconv" + "sync/atomic" + "time" +) + +const ( + // TimeFormatUnix defines a time format that makes time fields to be + // serialized as Unix timestamp integers. + TimeFormatUnix = "" + + // TimeFormatUnixMs defines a time format that makes time fields to be + // serialized as Unix timestamp integers in milliseconds. + TimeFormatUnixMs = "UNIXMS" + + // TimeFormatUnixMicro defines a time format that makes time fields to be + // serialized as Unix timestamp integers in microseconds. + TimeFormatUnixMicro = "UNIXMICRO" + + // TimeFormatUnixNano defines a time format that makes time fields to be + // serialized as Unix timestamp integers in nanoseconds. + TimeFormatUnixNano = "UNIXNANO" +) + +var ( + // TimestampFieldName is the field name used for the timestamp field. + TimestampFieldName = "time" + + // LevelFieldName is the field name used for the level field. + LevelFieldName = "level" + + // LevelTraceValue is the value used for the trace level field. + LevelTraceValue = "trace" + // LevelDebugValue is the value used for the debug level field. + LevelDebugValue = "debug" + // LevelInfoValue is the value used for the info level field. + LevelInfoValue = "info" + // LevelWarnValue is the value used for the warn level field. + LevelWarnValue = "warn" + // LevelErrorValue is the value used for the error level field. + LevelErrorValue = "error" + // LevelFatalValue is the value used for the fatal level field. + LevelFatalValue = "fatal" + // LevelPanicValue is the value used for the panic level field. + LevelPanicValue = "panic" + + // LevelFieldMarshalFunc allows customization of global level field marshaling. + LevelFieldMarshalFunc = func(l Level) string { + return l.String() + } + + // MessageFieldName is the field name used for the message field. + MessageFieldName = "message" + + // ErrorFieldName is the field name used for error fields. + ErrorFieldName = "error" + + // CallerFieldName is the field name used for caller field. + CallerFieldName = "caller" + + // CallerSkipFrameCount is the number of stack frames to skip to find the caller. + CallerSkipFrameCount = 2 + + // CallerMarshalFunc allows customization of global caller marshaling + CallerMarshalFunc = func(pc uintptr, file string, line int) string { + return file + ":" + strconv.Itoa(line) + } + + // ErrorStackFieldName is the field name used for error stacks. + ErrorStackFieldName = "stack" + + // ErrorStackMarshaler extract the stack from err if any. + ErrorStackMarshaler func(err error) interface{} + + // ErrorMarshalFunc allows customization of global error marshaling + ErrorMarshalFunc = func(err error) interface{} { + return err + } + + // InterfaceMarshalFunc allows customization of interface marshaling. + // Default: "encoding/json.Marshal" + InterfaceMarshalFunc = json.Marshal + + // TimeFieldFormat defines the time format of the Time field type. If set to + // TimeFormatUnix, TimeFormatUnixMs, TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX + // timestamp as integer. + TimeFieldFormat = time.RFC3339 + + // TimestampFunc defines the function called to generate a timestamp. + TimestampFunc = time.Now + + // DurationFieldUnit defines the unit for time.Duration type fields added + // using the Dur method. + DurationFieldUnit = time.Millisecond + + // DurationFieldInteger renders Dur fields as integer instead of float if + // set to true. + DurationFieldInteger = false + + // ErrorHandler is called whenever zerolog fails to write an event on its + // output. If not set, an error is printed on the stderr. This handler must + // be thread safe and non-blocking. + ErrorHandler func(err error) + + // DefaultContextLogger is returned from Ctx() if there is no logger associated + // with the context. + DefaultContextLogger *Logger +) + +var ( + gLevel = new(int32) + disableSampling = new(int32) +) + +// SetGlobalLevel sets the global override for log level. If this +// values is raised, all Loggers will use at least this value. +// +// To globally disable logs, set GlobalLevel to Disabled. +func SetGlobalLevel(l Level) { + atomic.StoreInt32(gLevel, int32(l)) +} + +// GlobalLevel returns the current global log level +func GlobalLevel() Level { + return Level(atomic.LoadInt32(gLevel)) +} + +// DisableSampling will disable sampling in all Loggers if true. +func DisableSampling(v bool) { + var i int32 + if v { + i = 1 + } + atomic.StoreInt32(disableSampling, i) +} + +func samplingDisabled() bool { + return atomic.LoadInt32(disableSampling) == 1 +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/go112.go b/hotelReservation/vendor/github.com/rs/zerolog/go112.go new file mode 100644 index 000000000..e7b5a1bdc --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/go112.go @@ -0,0 +1,7 @@ +// +build go1.12 + +package zerolog + +// Since go 1.12, some auto generated init functions are hidden from +// runtime.Caller. +const contextCallerSkipFrameCount = 2 diff --git a/hotelReservation/vendor/github.com/rs/zerolog/hook.go b/hotelReservation/vendor/github.com/rs/zerolog/hook.go new file mode 100644 index 000000000..ec6effc1a --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/hook.go @@ -0,0 +1,64 @@ +package zerolog + +// Hook defines an interface to a log hook. +type Hook interface { + // Run runs the hook with the event. + Run(e *Event, level Level, message string) +} + +// HookFunc is an adaptor to allow the use of an ordinary function +// as a Hook. +type HookFunc func(e *Event, level Level, message string) + +// Run implements the Hook interface. +func (h HookFunc) Run(e *Event, level Level, message string) { + h(e, level, message) +} + +// LevelHook applies a different hook for each level. +type LevelHook struct { + NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook +} + +// Run implements the Hook interface. +func (h LevelHook) Run(e *Event, level Level, message string) { + switch level { + case TraceLevel: + if h.TraceHook != nil { + h.TraceHook.Run(e, level, message) + } + case DebugLevel: + if h.DebugHook != nil { + h.DebugHook.Run(e, level, message) + } + case InfoLevel: + if h.InfoHook != nil { + h.InfoHook.Run(e, level, message) + } + case WarnLevel: + if h.WarnHook != nil { + h.WarnHook.Run(e, level, message) + } + case ErrorLevel: + if h.ErrorHook != nil { + h.ErrorHook.Run(e, level, message) + } + case FatalLevel: + if h.FatalHook != nil { + h.FatalHook.Run(e, level, message) + } + case PanicLevel: + if h.PanicHook != nil { + h.PanicHook.Run(e, level, message) + } + case NoLevel: + if h.NoLevelHook != nil { + h.NoLevelHook.Run(e, level, message) + } + } +} + +// NewLevelHook returns a new LevelHook. +func NewLevelHook() LevelHook { + return LevelHook{} +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/README.md b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/README.md new file mode 100644 index 000000000..92c2e8c7f --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/README.md @@ -0,0 +1,56 @@ +## Reference: + CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) + +## Comparison of JSON vs CBOR + +Two main areas of reduction are: + +1. CPU usage to write a log msg +2. Size (in bytes) of log messages. + + +CPU Usage savings are below: +``` +name JSON time/op CBOR time/op delta +Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) +ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) +ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) +LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) +LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) +LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) +LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) +LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) +LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) +LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) +LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) +LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) +LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) +LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) +LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) +LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) +LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) +LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) +LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) +LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) +LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) +LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) +LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) +LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) +``` + +Log message size savings is greatly dependent on the number and type of fields in the log message. +Assuming this log message (with an Integer, timestamp and string, in addition to level). + +`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` + +Two measurements were done for the log file sizes - one without any compression, second +using [compress/zlib](https://golang.org/pkg/compress/zlib/). + +Results for 10,000 log messages: + +| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | +| :--- | :---: | :---: | +| JSON | 920 | 28 | +| CBOR | 550 | 28 | + +The example used to calculate the above data is available in [Examples](examples). diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/base.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/base.go new file mode 100644 index 000000000..51fe86c9b --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/base.go @@ -0,0 +1,19 @@ +package cbor + +// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice. +// Making it package level instead of embedded in Encoder brings +// some extra efforts at importing, but avoids value copy when the functions +// of Encoder being invoked. +// DO REMEMBER to set this variable at importing, or +// you might get a nil pointer dereference panic at runtime. +var JSONMarshalFunc func(v interface{}) ([]byte, error) + +type Encoder struct{} + +// AppendKey adds a key (string) to the binary encoded log message +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if len(dst) < 1 { + dst = e.AppendBeginMarker(dst) + } + return e.AppendString(dst, key) +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/cbor.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/cbor.go new file mode 100644 index 000000000..1bf144380 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/cbor.go @@ -0,0 +1,102 @@ +// Package cbor provides primitives for storing different data +// in the CBOR (binary) format. CBOR is defined in RFC7049. +package cbor + +import "time" + +const ( + majorOffset = 5 + additionalMax = 23 + + // Non Values. + additionalTypeBoolFalse byte = 20 + additionalTypeBoolTrue byte = 21 + additionalTypeNull byte = 22 + + // Integer (+ve and -ve) Sub-types. + additionalTypeIntUint8 byte = 24 + additionalTypeIntUint16 byte = 25 + additionalTypeIntUint32 byte = 26 + additionalTypeIntUint64 byte = 27 + + // Float Sub-types. + additionalTypeFloat16 byte = 25 + additionalTypeFloat32 byte = 26 + additionalTypeFloat64 byte = 27 + additionalTypeBreak byte = 31 + + // Tag Sub-types. + additionalTypeTimestamp byte = 01 + additionalTypeEmbeddedCBOR byte = 63 + + // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml + additionalTypeTagNetworkAddr uint16 = 260 + additionalTypeTagNetworkPrefix uint16 = 261 + additionalTypeEmbeddedJSON uint16 = 262 + additionalTypeTagHexString uint16 = 263 + + // Unspecified number of elements. + additionalTypeInfiniteCount byte = 31 +) +const ( + majorTypeUnsignedInt byte = iota << majorOffset // Major type 0 + majorTypeNegativeInt // Major type 1 + majorTypeByteString // Major type 2 + majorTypeUtf8String // Major type 3 + majorTypeArray // Major type 4 + majorTypeMap // Major type 5 + majorTypeTags // Major type 6 + majorTypeSimpleAndFloat // Major type 7 +) + +const ( + maskOutAdditionalType byte = (7 << majorOffset) + maskOutMajorType byte = 31 +) + +const ( + float32Nan = "\xfa\x7f\xc0\x00\x00" + float32PosInfinity = "\xfa\x7f\x80\x00\x00" + float32NegInfinity = "\xfa\xff\x80\x00\x00" + float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" + float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" + float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" +) + +// IntegerTimeFieldFormat indicates the format of timestamp decoded +// from an integer (time in seconds). +var IntegerTimeFieldFormat = time.RFC3339 + +// NanoTimeFieldFormat indicates the format of timestamp decoded +// from a float value (time in seconds and nanoseconds). +var NanoTimeFieldFormat = time.RFC3339Nano + +func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { + byteCount := 8 + var minor byte + switch { + case number < 256: + byteCount = 1 + minor = additionalTypeIntUint8 + + case number < 65536: + byteCount = 2 + minor = additionalTypeIntUint16 + + case number < 4294967296: + byteCount = 4 + minor = additionalTypeIntUint32 + + default: + byteCount = 8 + minor = additionalTypeIntUint64 + + } + + dst = append(dst, major|minor) + byteCount-- + for ; byteCount >= 0; byteCount-- { + dst = append(dst, byte(number>>(uint(byteCount)*8))) + } + return dst +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go new file mode 100644 index 000000000..616bed65d --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go @@ -0,0 +1,654 @@ +package cbor + +// This file contains code to decode a stream of CBOR Data into JSON. + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "runtime" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +var decodeTimeZone *time.Location + +const hexTable = "0123456789abcdef" + +const isFloat32 = 4 +const isFloat64 = 8 + +func readNBytes(src *bufio.Reader, n int) []byte { + ret := make([]byte, n) + for i := 0; i < n; i++ { + ch, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n)) + } + ret[i] = ch + } + return ret +} + +func readByte(src *bufio.Reader) byte { + b, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file")) + } + return b +} + +func decodeIntAdditionalType(src *bufio.Reader, minor byte) int64 { + val := int64(0) + if minor <= 23 { + val = int64(minor) + } else { + bytesToRead := 0 + switch minor { + case additionalTypeIntUint8: + bytesToRead = 1 + case additionalTypeIntUint16: + bytesToRead = 2 + case additionalTypeIntUint32: + bytesToRead = 4 + case additionalTypeIntUint64: + bytesToRead = 8 + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor)) + } + pb := readNBytes(src, bytesToRead) + for i := 0; i < bytesToRead; i++ { + val = val * 256 + val += int64(pb[i]) + } + } + return val +} + +func decodeInteger(src *bufio.Reader) int64 { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUnsignedInt && major != majorTypeNegativeInt { + panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major)) + } + val := decodeIntAdditionalType(src, minor) + if major == 0 { + return val + } + return (-1 - val) +} + +func decodeFloat(src *bufio.Reader) (float64, int) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major)) + } + + switch minor { + case additionalTypeFloat16: + panic(fmt.Errorf("float16 is not suppported in decodeFloat")) + + case additionalTypeFloat32: + pb := readNBytes(src, 4) + switch string(pb) { + case float32Nan: + return math.NaN(), isFloat32 + case float32PosInfinity: + return math.Inf(0), isFloat32 + case float32NegInfinity: + return math.Inf(-1), isFloat32 + } + n := uint32(0) + for i := 0; i < 4; i++ { + n = n * 256 + n += uint32(pb[i]) + } + val := math.Float32frombits(n) + return float64(val), isFloat32 + case additionalTypeFloat64: + pb := readNBytes(src, 8) + switch string(pb) { + case float64Nan: + return math.NaN(), isFloat64 + case float64PosInfinity: + return math.Inf(0), isFloat64 + case float64NegInfinity: + return math.Inf(-1), isFloat64 + } + n := uint64(0) + for i := 0; i < 8; i++ { + n = n * 256 + n += uint64(pb[i]) + } + val := math.Float64frombits(n) + return val, isFloat64 + } + panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor)) +} + +func decodeStringComplex(dst []byte, s string, pos uint) []byte { + i := int(pos) + start := 0 + + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a replacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} + +func decodeString(src *bufio.Reader, noQuotes bool) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeByteString { + panic(fmt.Errorf("Major type is: %d in decodeString", major)) + } + result := []byte{} + if !noQuotes { + result = append(result, '"') + } + length := decodeIntAdditionalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + result = append(result, pbs...) + if noQuotes { + return result + } + return append(result, '"') +} +func decodeStringToDataUrl(src *bufio.Reader, mimeType string) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeByteString { + panic(fmt.Errorf("Major type is: %d in decodeString", major)) + } + length := decodeIntAdditionalType(src, minor) + l := int(length) + enc := base64.StdEncoding + lEnc := enc.EncodedLen(l) + result := make([]byte, len("\"data:;base64,\"")+len(mimeType)+lEnc) + dest := result + u := copy(dest, "\"data:") + dest = dest[u:] + u = copy(dest, mimeType) + dest = dest[u:] + u = copy(dest, ";base64,") + dest = dest[u:] + pbs := readNBytes(src, l) + enc.Encode(dest, pbs) + dest = dest[lEnc:] + dest[0] = '"' + return result +} + +func decodeUTF8String(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUtf8String { + panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major)) + } + result := []byte{'"'} + length := decodeIntAdditionalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + + for i := 0; i < len; i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst := []byte{'"'} + dst = decodeStringComplex(dst, string(pbs), uint(i)) + return append(dst, '"') + } + } + // The string has no need for encoding and therefore is directly + // appended to the byte slice. + result = append(result, pbs...) + return append(result, '"') +} + +func array2Json(src *bufio.Reader, dst io.Writer) { + dst.Write([]byte{'['}) + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeArray { + panic(fmt.Errorf("Major type is: %d in array2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditionalType(src, minor) + len = int(length) + } + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + dst.Write([]byte{']'}) +} + +func map2Json(src *bufio.Reader, dst io.Writer) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeMap { + panic(fmt.Errorf("Major type is: %d in map2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditionalType(src, minor) + len = int(length) + } + dst.Write([]byte{'{'}) + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if i%2 == 0 { + // Even position values are keys. + dst.Write([]byte{':'}) + } else { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + } + dst.Write([]byte{'}'}) +} + +func decodeTagData(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeTags { + panic(fmt.Errorf("Major type is: %d in decodeTagData", major)) + } + switch minor { + case additionalTypeTimestamp: + return decodeTimeStamp(src) + case additionalTypeIntUint8: + val := decodeIntAdditionalType(src, minor) + switch byte(val) { + case additionalTypeEmbeddedCBOR: + pb := readByte(src) + dataMajor := pb & maskOutAdditionalType + if dataMajor != majorTypeByteString { + panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedCBOR", dataMajor)) + } + src.UnreadByte() + return decodeStringToDataUrl(src, "application/cbor") + default: + panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) + } + + // Tag value is larger than 256 (so uint16). + case additionalTypeIntUint16: + val := decodeIntAdditionalType(src, minor) + + switch uint16(val) { + case additionalTypeEmbeddedJSON: + pb := readByte(src) + dataMajor := pb & maskOutAdditionalType + if dataMajor != majorTypeByteString { + panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor)) + } + src.UnreadByte() + return decodeString(src, true) + + case additionalTypeTagNetworkAddr: + octets := decodeString(src, true) + ss := []byte{'"'} + switch len(octets) { + case 6: // MAC address. + ha := net.HardwareAddr(octets) + ss = append(append(ss, ha.String()...), '"') + case 4: // IPv4 address. + fallthrough + case 16: // IPv6 address. + ip := net.IP(octets) + ss = append(append(ss, ip.String()...), '"') + default: + panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets))) + } + return ss + + case additionalTypeTagNetworkPrefix: + pb := readByte(src) + if pb != majorTypeMap|0x1 { + panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected")) + } + octets := decodeString(src, true) + val := decodeInteger(src) + ip := net.IP(octets) + var mask net.IPMask + pfxLen := int(val) + if len(octets) == 4 { + mask = net.CIDRMask(pfxLen, 32) + } else { + mask = net.CIDRMask(pfxLen, 128) + } + ipPfx := net.IPNet{IP: ip, Mask: mask} + ss := []byte{'"'} + ss = append(append(ss, ipPfx.String()...), '"') + return ss + + case additionalTypeTagHexString: + octets := decodeString(src, true) + ss := []byte{'"'} + for _, v := range octets { + ss = append(ss, hexTable[v>>4], hexTable[v&0x0f]) + } + return append(ss, '"') + + default: + panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) + } + } + panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor)) +} + +func decodeTimeStamp(src *bufio.Reader) []byte { + pb := readByte(src) + src.UnreadByte() + tsMajor := pb & maskOutAdditionalType + if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt { + n := decodeInteger(src) + t := time.Unix(n, 0) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } else if tsMajor == majorTypeSimpleAndFloat { + n, _ := decodeFloat(src) + secs := int64(n) + n -= float64(secs) + n *= float64(1e9) + t := time.Unix(secs, int64(n)) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, NanoTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } + panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor)) +} + +func decodeSimpleFloat(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major)) + } + switch minor { + case additionalTypeBoolTrue: + return []byte("true") + case additionalTypeBoolFalse: + return []byte("false") + case additionalTypeNull: + return []byte("null") + case additionalTypeFloat16: + fallthrough + case additionalTypeFloat32: + fallthrough + case additionalTypeFloat64: + src.UnreadByte() + v, bc := decodeFloat(src) + ba := []byte{} + switch { + case math.IsNaN(v): + return []byte("\"NaN\"") + case math.IsInf(v, 1): + return []byte("\"+Inf\"") + case math.IsInf(v, -1): + return []byte("\"-Inf\"") + } + if bc == isFloat32 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 32) + } else if bc == isFloat64 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 64) + } else { + panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc)) + } + return ba + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor)) + } +} + +func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + major := (pb[0] & maskOutAdditionalType) + + switch major { + case majorTypeUnsignedInt: + fallthrough + case majorTypeNegativeInt: + n := decodeInteger(src) + dst.Write([]byte(strconv.Itoa(int(n)))) + + case majorTypeByteString: + s := decodeString(src, false) + dst.Write(s) + + case majorTypeUtf8String: + s := decodeUTF8String(src) + dst.Write(s) + + case majorTypeArray: + array2Json(src, dst) + + case majorTypeMap: + map2Json(src, dst) + + case majorTypeTags: + s := decodeTagData(src) + dst.Write(s) + + case majorTypeSimpleAndFloat: + s := decodeSimpleFloat(src) + dst.Write(s) + } +} + +func moreBytesToRead(src *bufio.Reader) bool { + _, e := src.ReadByte() + if e == nil { + src.UnreadByte() + return true + } + return false +} + +// Cbor2JsonManyObjects decodes all the CBOR Objects read from src +// reader. It keeps on decoding until reader returns EOF (error when reading). +// Decoded string is written to the dst. At the end of every CBOR Object +// newline is written to the output stream. +// +// Returns error (if any) that was encountered during decode. +// The child functions will generate a panic when error is encountered and +// this function will recover non-runtime Errors and return the reason as error. +func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + bufRdr := bufio.NewReader(src) + for moreBytesToRead(bufRdr) { + cbor2JsonOneObject(bufRdr, dst) + dst.Write([]byte("\n")) + } + return nil +} + +// Detect if the bytes to be printed is Binary or not. +func binaryFmt(p []byte) bool { + if len(p) > 0 && p[0] > 0x7F { + return true + } + return false +} + +func getReader(str string) *bufio.Reader { + return bufio.NewReader(strings.NewReader(str)) +} + +// DecodeIfBinaryToString converts a binary formatted log msg to a +// JSON formatted String Log message - suitable for printing to Console/Syslog. +func DecodeIfBinaryToString(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(strings.NewReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeObjectToStr checks if the input is a binary format, if so, +// it will decode a single Object and return the decoded string. +func DecodeObjectToStr(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + cbor2JsonOneObject(getReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeIfBinaryToBytes checks if the input is a binary format, if so, +// it will decode all Objects and return the decoded string as byte array. +func DecodeIfBinaryToBytes(in []byte) []byte { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(bytes.NewReader(in), &b) + return b.Bytes() + } + return in +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/string.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/string.go new file mode 100644 index 000000000..9fc9a4f8e --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/string.go @@ -0,0 +1,117 @@ +package cbor + +import "fmt" + +// AppendStrings encodes and adds an array of strings to the dst byte array. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + major := majorTypeArray + l := len(vals) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendString(dst, v) + } + return dst +} + +// AppendString encodes and adds a string to the dst byte array. +func (Encoder) AppendString(dst []byte, s string) []byte { + major := majorTypeUtf8String + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) + } + return append(dst, s...) +} + +// AppendStringers encodes and adds an array of Stringer values +// to the dst byte array. +func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { + if len(vals) == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + dst = e.AppendArrayStart(dst) + dst = e.AppendStringer(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendStringer(dst, val) + } + } + return e.AppendArrayEnd(dst) +} + +// AppendStringer encodes and adds the Stringer value to the dst +// byte array. +func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { + if val == nil { + return e.AppendNil(dst) + } + return e.AppendString(dst, val.String()) +} + +// AppendBytes encodes and adds an array of bytes to the dst byte array. +func (Encoder) AppendBytes(dst, s []byte) []byte { + major := majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} + +// AppendEmbeddedJSON adds a tag and embeds input JSON as such. +func AppendEmbeddedJSON(dst, s []byte) []byte { + major := majorTypeTags + minor := additionalTypeEmbeddedJSON + + // Append the TAG to indicate this is Embedded JSON. + dst = append(dst, major|additionalTypeIntUint16) + dst = append(dst, byte(minor>>8)) + dst = append(dst, byte(minor&0xff)) + + // Append the JSON Object as Byte String. + major = majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} + +// AppendEmbeddedCBOR adds a tag and embeds input CBOR as such. +func AppendEmbeddedCBOR(dst, s []byte) []byte { + major := majorTypeTags + minor := additionalTypeEmbeddedCBOR + + // Append the TAG to indicate this is Embedded JSON. + dst = append(dst, major|additionalTypeIntUint8) + dst = append(dst, minor) + + // Append the CBOR Object as Byte String. + major = majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/time.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/time.go new file mode 100644 index 000000000..d81fb1257 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/time.go @@ -0,0 +1,93 @@ +package cbor + +import ( + "time" +) + +func appendIntegerTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, major|minor) + secs := t.Unix() + var val uint64 + if secs < 0 { + major = majorTypeNegativeInt + val = uint64(-secs - 1) + } else { + major = majorTypeUnsignedInt + val = uint64(secs) + } + dst = appendCborTypePrefix(dst, major, val) + return dst +} + +func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, major|minor) + secs := t.Unix() + nanos := t.Nanosecond() + var val float64 + val = float64(secs)*1.0 + float64(nanos)*1e-9 + return e.AppendFloat64(dst, val) +} + +// AppendTime encodes and adds a timestamp to the dst byte array. +func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { + utc := t.UTC() + if utc.Nanosecond() == 0 { + return appendIntegerTimestamp(dst, utc) + } + return e.appendFloatTimestamp(dst, utc) +} + +// AppendTimes encodes and adds an array of timestamps to the dst byte array. +func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + + for _, t := range vals { + dst = e.AppendTime(dst, t, unused) + } + return dst +} + +// AppendDuration encodes and adds a duration to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return e.AppendInt64(dst, int64(d/unit)) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations encodes and adds an array of durations to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, d := range vals { + dst = e.AppendDuration(dst, d, unit, useInt) + } + return dst +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/types.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/types.go new file mode 100644 index 000000000..6f5383289 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/cbor/types.go @@ -0,0 +1,486 @@ +package cbor + +import ( + "fmt" + "math" + "net" + "reflect" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeNull) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, majorTypeMap|additionalTypeInfiniteCount) +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak) +} + +// AppendObjectData takes an object in form of a byte array and appends to dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // BeginMarker is present in the dst, which + // should not be copied when appending to existing data. + return append(dst, o[1:]...) +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, majorTypeArray|additionalTypeInfiniteCount) +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak) +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + //No delimiters needed in cbor + return dst +} + +// AppendLineBreak is a noop that keep API compat with json encoder. +func (Encoder) AppendLineBreak(dst []byte) []byte { + // No line breaks needed in binary format. + return dst +} + +// AppendBool encodes and inserts a boolean value into the dst byte array. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + b := additionalTypeBoolFalse + if val { + b = additionalTypeBoolTrue + } + return append(dst, majorTypeSimpleAndFloat|b) +} + +// AppendBools encodes and inserts an array of boolean values into the dst byte array. +func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendBool(dst, v) + } + return dst +} + +// AppendInt encodes and inserts an integer value into the dst byte array. +func (Encoder) AppendInt(dst []byte, val int) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts(dst []byte, vals []int) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, v) + } + return dst +} + +// AppendInt8 encodes and inserts an int8 value into the dst byte array. +func (e Encoder) AppendInt8(dst []byte, val int8) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts8 encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt16 encodes and inserts a int16 value into the dst byte array. +func (e Encoder) AppendInt16(dst []byte, val int16) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts16 encodes and inserts an array of int16 values into the dst byte array. +func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt32 encodes and inserts a int32 value into the dst byte array. +func (e Encoder) AppendInt32(dst []byte, val int32) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts32 encodes and inserts an array of int32 values into the dst byte array. +func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt64 encodes and inserts a int64 value into the dst byte array. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts64 encodes and inserts an array of int64 values into the dst byte array. +func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt64(dst, v) + } + return dst +} + +// AppendUint encodes and inserts an unsigned integer value into the dst byte array. +func (e Encoder) AppendUint(dst []byte, val uint) []byte { + return e.AppendInt64(dst, int64(val)) +} + +// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array. +func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint(dst, v) + } + return dst +} + +// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array. +func (e Encoder) AppendUint8(dst []byte, val uint8) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array. +func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint8(dst, v) + } + return dst +} + +// AppendUint16 encodes and inserts a uint16 value into the dst byte array. +func (e Encoder) AppendUint16(dst []byte, val uint16) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array. +func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint16(dst, v) + } + return dst +} + +// AppendUint32 encodes and inserts a uint32 value into the dst byte array. +func (e Encoder) AppendUint32(dst []byte, val uint32) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array. +func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint32(dst, v) + } + return dst +} + +// AppendUint64 encodes and inserts a uint64 value into the dst byte array. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, contentVal) + } + return dst +} + +// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array. +func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint64(dst, v) + } + return dst +} + +// AppendFloat32 encodes and inserts a single precision float value into the dst byte array. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + switch { + case math.IsNaN(float64(val)): + return append(dst, "\xfa\x7f\xc0\x00\x00"...) + case math.IsInf(float64(val), 1): + return append(dst, "\xfa\x7f\x80\x00\x00"...) + case math.IsInf(float64(val), -1): + return append(dst, "\xfa\xff\x80\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat32 + n := math.Float32bits(val) + var buf [4]byte + for i := uint(0); i < 4; i++ { + buf[i] = byte(n >> ((3 - i) * 8)) + } + return append(append(dst, major|subType), buf[0], buf[1], buf[2], buf[3]) +} + +// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array. +func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat32(dst, v) + } + return dst +} + +// AppendFloat64 encodes and inserts a double precision float value into the dst byte array. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + switch { + case math.IsNaN(val): + return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, 1): + return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, -1): + return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat64 + n := math.Float64bits(val) + dst = append(dst, major|subType) + for i := uint(1); i <= 8; i++ { + b := byte(n >> ((8 - i) * 8)) + dst = append(dst, b) + } + return dst +} + +// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array. +func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat64(dst, v) + } + return dst +} + +// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := JSONMarshalFunc(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return AppendEmbeddedJSON(dst, marshaled) +} + +// AppendType appends the parameter type (as a string) to the input byte slice. +func (e Encoder) AppendType(dst []byte, i interface{}) []byte { + if i == nil { + return e.AppendString(dst, "") + } + return e.AppendString(dst, reflect.TypeOf(i).String()) +} + +// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6). +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ip) +} + +// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length). +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8)) + dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff)) + + // Prefix is a tuple (aka MAP of 1 pair of elements) - + // first element is prefix, second is mask length. + dst = append(dst, majorTypeMap|0x1) + dst = e.AppendBytes(dst, pfx.IP) + maskLen, _ := pfx.Mask.Size() + return e.AppendUint8(dst, uint8(maskLen)) +} + +// AppendMACAddr encodes and inserts a Hardware (MAC) address. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ha) +} + +// AppendHex adds a TAG and inserts a hex bytes as a string. +func (e Encoder) AppendHex(dst []byte, val []byte) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagHexString>>8)) + dst = append(dst, byte(additionalTypeTagHexString&0xff)) + return e.AppendBytes(dst, val) +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/json/base.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/base.go new file mode 100644 index 000000000..09ec59f4e --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/base.go @@ -0,0 +1,19 @@ +package json + +// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice. +// Making it package level instead of embedded in Encoder brings +// some extra efforts at importing, but avoids value copy when the functions +// of Encoder being invoked. +// DO REMEMBER to set this variable at importing, or +// you might get a nil pointer dereference panic at runtime. +var JSONMarshalFunc func(v interface{}) ([]byte, error) + +type Encoder struct{} + +// AppendKey appends a new key to the output JSON. +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if dst[len(dst)-1] != '{' { + dst = append(dst, ',') + } + return append(e.AppendString(dst, key), ':') +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/json/bytes.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/bytes.go new file mode 100644 index 000000000..de64120d1 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/bytes.go @@ -0,0 +1,85 @@ +package json + +import "unicode/utf8" + +// AppendBytes is a mirror of appendString with []byte arg +func (Encoder) AppendBytes(dst, s []byte) []byte { + dst = append(dst, '"') + for i := 0; i < len(s); i++ { + if !noEscapeTable[s[i]] { + dst = appendBytesComplex(dst, s, i) + return append(dst, '"') + } + } + dst = append(dst, s...) + return append(dst, '"') +} + +// AppendHex encodes the input bytes to a hex string and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte and encodes it as hex using +// the hex lookup table. +func (Encoder) AppendHex(dst, s []byte) []byte { + dst = append(dst, '"') + for _, v := range s { + dst = append(dst, hex[v>>4], hex[v&0x0f]) + } + return append(dst, '"') +} + +// appendBytesComplex is a mirror of the appendStringComplex +// with []byte arg +func appendBytesComplex(dst, s []byte, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRune(s[i:]) + if r == utf8.RuneError && size == 1 { + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/json/string.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/string.go new file mode 100644 index 000000000..fd7770f2f --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/string.go @@ -0,0 +1,149 @@ +package json + +import ( + "fmt" + "unicode/utf8" +) + +const hex = "0123456789abcdef" + +var noEscapeTable = [256]bool{} + +func init() { + for i := 0; i <= 0x7e; i++ { + noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' + } +} + +// AppendStrings encodes the input strings to json and +// appends the encoded string list to the input byte slice. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendString(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendString(append(dst, ','), val) + } + } + dst = append(dst, ']') + return dst +} + +// AppendString encodes the input string to json and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte in the string looking +// for characters that need json or utf8 encoding. If the string +// does not need encoding, then the string is appended in its +// entirety to the byte slice. +// If we encounter a byte that does need encoding, switch up +// the operation and perform a byte-by-byte read-encode-append. +func (Encoder) AppendString(dst []byte, s string) []byte { + // Start with a double quote. + dst = append(dst, '"') + // Loop through each character in the string. + for i := 0; i < len(s); i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if !noEscapeTable[s[i]] { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst = appendStringComplex(dst, s, i) + return append(dst, '"') + } + } + // The string has no need for encoding and therefore is directly + // appended to the byte slice. + dst = append(dst, s...) + // End with a double quote + return append(dst, '"') +} + +// AppendStringers encodes the provided Stringer list to json and +// appends the encoded Stringer list to the input byte slice. +func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendStringer(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendStringer(append(dst, ','), val) + } + } + return append(dst, ']') +} + +// AppendStringer encodes the input Stringer to json and appends the +// encoded Stringer value to the input byte slice. +func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { + if val == nil { + return e.AppendInterface(dst, nil) + } + return e.AppendString(dst, val.String()) +} + +//// appendStringComplex is used by appendString to take over an in +// progress JSON string encoding that encountered a character that needs +// to be encoded. +func appendStringComplex(dst []byte, s string, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a replacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/json/time.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/time.go new file mode 100644 index 000000000..6a8dc912d --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/time.go @@ -0,0 +1,113 @@ +package json + +import ( + "strconv" + "time" +) + +const ( + // Import from zerolog/global.go + timeFormatUnix = "" + timeFormatUnixMs = "UNIXMS" + timeFormatUnixMicro = "UNIXMICRO" + timeFormatUnixNano = "UNIXNANO" +) + +// AppendTime formats the input time with the given format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { + switch format { + case timeFormatUnix: + return e.AppendInt64(dst, t.Unix()) + case timeFormatUnixMs: + return e.AppendInt64(dst, t.UnixNano()/1000000) + case timeFormatUnixMicro: + return e.AppendInt64(dst, t.UnixNano()/1000) + case timeFormatUnixNano: + return e.AppendInt64(dst, t.UnixNano()) + } + return append(t.AppendFormat(append(dst, '"'), format), '"') +} + +// AppendTimes converts the input times with the given format +// and appends the encoded string list to the input byte slice. +func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { + switch format { + case timeFormatUnix: + return appendUnixTimes(dst, vals) + case timeFormatUnixMs: + return appendUnixNanoTimes(dst, vals, 1000000) + case timeFormatUnixMicro: + return appendUnixNanoTimes(dst, vals, 1000) + case timeFormatUnixNano: + return appendUnixNanoTimes(dst, vals, 1) + } + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"') + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"') + } + } + dst = append(dst, ']') + return dst +} + +func appendUnixTimes(dst []byte, vals []time.Time) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0].Unix(), 10) + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) + } + } + dst = append(dst, ']') + return dst +} + +func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0].UnixNano()/div, 10) + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/div, 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendDuration formats the input duration with the given unit & format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return strconv.AppendInt(dst, int64(d/unit), 10) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations formats the input durations with the given unit & format +// and appends the encoded string list to the input byte slice. +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendDuration(dst, vals[0], unit, useInt) + if len(vals) > 1 { + for _, d := range vals[1:] { + dst = e.AppendDuration(append(dst, ','), d, unit, useInt) + } + } + dst = append(dst, ']') + return dst +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/internal/json/types.go b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/types.go new file mode 100644 index 000000000..ef3a2a7a3 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/internal/json/types.go @@ -0,0 +1,414 @@ +package json + +import ( + "fmt" + "math" + "net" + "reflect" + "strconv" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, "null"...) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, '{') +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, '}') +} + +// AppendLineBreak appends a line break. +func (Encoder) AppendLineBreak(dst []byte) []byte { + return append(dst, '\n') +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, '[') +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, ']') +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + if len(dst) > 0 { + return append(dst, ',') + } + return dst +} + +// AppendBool converts the input bool to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + return strconv.AppendBool(dst, val) +} + +// AppendBools encodes the input bools to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendBools(dst []byte, vals []bool) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendBool(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendBool(append(dst, ','), val) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt converts the input int to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt(dst []byte, val int) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts encodes the input ints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts(dst []byte, vals []int) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt8 converts the input []int8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt8(dst []byte, val int8) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts8 encodes the input int8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts8(dst []byte, vals []int8) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt16 converts the input int16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt16(dst []byte, val int16) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts16 encodes the input int16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts16(dst []byte, vals []int16) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt32 converts the input int32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt32(dst []byte, val int32) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts32 encodes the input int32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts32(dst []byte, vals []int32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt64 converts the input int64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + return strconv.AppendInt(dst, val, 10) +} + +// AppendInts64 encodes the input int64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts64(dst []byte, vals []int64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0], 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), val, 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint converts the input uint to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint(dst []byte, val uint) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints encodes the input uints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints(dst []byte, vals []uint) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint8 converts the input uint8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint8(dst []byte, val uint8) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints8 encodes the input uint8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint16 converts the input uint16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint16(dst []byte, val uint16) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints16 encodes the input uint16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint32 converts the input uint32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint32(dst []byte, val uint32) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints32 encodes the input uint32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint64 converts the input uint64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + return strconv.AppendUint(dst, val, 10) +} + +// AppendUints64 encodes the input uint64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, vals[0], 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), val, 10) + } + } + dst = append(dst, ']') + return dst +} + +func appendFloat(dst []byte, val float64, bitSize int) []byte { + // JSON does not permit NaN or Infinity. A typical JSON encoder would fail + // with an error, but a logging library wants the data to get through so we + // make a tradeoff and store those types as string. + switch { + case math.IsNaN(val): + return append(dst, `"NaN"`...) + case math.IsInf(val, 1): + return append(dst, `"+Inf"`...) + case math.IsInf(val, -1): + return append(dst, `"-Inf"`...) + } + return strconv.AppendFloat(dst, val, 'f', -1, bitSize) +} + +// AppendFloat32 converts the input float32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + return appendFloat(dst, float64(val), 32) +} + +// AppendFloats32 encodes the input float32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = appendFloat(dst, float64(vals[0]), 32) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = appendFloat(append(dst, ','), float64(val), 32) + } + } + dst = append(dst, ']') + return dst +} + +// AppendFloat64 converts the input float64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + return appendFloat(dst, val, 64) +} + +// AppendFloats64 encodes the input float64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = appendFloat(dst, vals[0], 64) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = appendFloat(append(dst, ','), val, 64) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInterface marshals the input interface to a string and +// appends the encoded string to the input byte slice. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := JSONMarshalFunc(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return append(dst, marshaled...) +} + +// AppendType appends the parameter type (as a string) to the input byte slice. +func (e Encoder) AppendType(dst []byte, i interface{}) []byte { + if i == nil { + return e.AppendString(dst, "") + } + return e.AppendString(dst, reflect.TypeOf(i).String()) +} + +// AppendObjectData takes in an object that is already in a byte array +// and adds it to the dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // Three conditions apply here: + // 1. new content starts with '{' - which should be dropped OR + // 2. new content starts with '{' - which should be replaced with ',' + // to separate with existing content OR + // 3. existing content has already other fields + if o[0] == '{' { + if len(dst) > 1 { + dst = append(dst, ',') + } + o = o[1:] + } else if len(dst) > 1 { + dst = append(dst, ',') + } + return append(dst, o...) +} + +// AppendIPAddr adds IPv4 or IPv6 address to dst. +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + return e.AppendString(dst, ip.String()) +} + +// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst. +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + return e.AppendString(dst, pfx.String()) + +} + +// AppendMACAddr adds MAC address to dst. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + return e.AppendString(dst, ha.String()) +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/log.go b/hotelReservation/vendor/github.com/rs/zerolog/log.go new file mode 100644 index 000000000..834c7e604 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/log.go @@ -0,0 +1,498 @@ +// Package zerolog provides a lightweight logging library dedicated to JSON logging. +// +// A global Logger can be use for simple logging: +// +// import "github.com/rs/zerolog/log" +// +// log.Info().Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world"} +// +// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log". +// +// Fields can be added to log messages: +// +// log.Info().Str("foo", "bar").Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} +// +// Create logger instance to manage different outputs: +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Str("foo", "bar"). +// Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} +// +// Sub-loggers let you chain loggers with additional context: +// +// sublogger := log.With().Str("component": "foo").Logger() +// sublogger.Info().Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"} +// +// Level logging +// +// zerolog.SetGlobalLevel(zerolog.InfoLevel) +// +// log.Debug().Msg("filtered out message") +// log.Info().Msg("routed message") +// +// if e := log.Debug(); e.Enabled() { +// // Compute log output only if enabled. +// value := compute() +// e.Str("foo": value).Msg("some debug message") +// } +// // Output: {"level":"info","time":1494567715,"routed message"} +// +// Customize automatic field names: +// +// log.TimestampFieldName = "t" +// log.LevelFieldName = "p" +// log.MessageFieldName = "m" +// +// log.Info().Msg("hello world") +// // Output: {"t":1494567715,"p":"info","m":"hello world"} +// +// Log with no level and message: +// +// log.Log().Str("foo","bar").Msg("") +// // Output: {"time":1494567715,"foo":"bar"} +// +// Add contextual fields to global Logger: +// +// log.Logger = log.With().Str("foo", "bar").Logger() +// +// Sample logs: +// +// sampled := log.Sample(&zerolog.BasicSampler{N: 10}) +// sampled.Info().Msg("will be logged every 10 messages") +// +// Log with contextual hooks: +// +// // Create the hook: +// type SeverityHook struct{} +// +// func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { +// if level != zerolog.NoLevel { +// e.Str("severity", level.String()) +// } +// } +// +// // And use it: +// var h SeverityHook +// log := zerolog.New(os.Stdout).Hook(h) +// log.Warn().Msg("") +// // Output: {"level":"warn","severity":"warn"} +// +// # Caveats +// +// Field duplication: +// +// There is no fields deduplication out-of-the-box. +// Using the same key multiple times creates new key in final JSON each time. +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Timestamp(). +// Msg("dup") +// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +// +// In this case, many consumers will take the last value, +// but this is not guaranteed; check yours if in doubt. +// +// Concurrency safety: +// +// Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger: +// +// func handler(w http.ResponseWriter, r *http.Request) { +// // Create a child logger for concurrency safety +// logger := log.Logger.With().Logger() +// +// // Add context fields, for example User-Agent from HTTP headers +// logger.UpdateContext(func(c zerolog.Context) zerolog.Context { +// ... +// }) +// } +package zerolog + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Level defines log levels. +type Level int8 + +const ( + // DebugLevel defines debug log level. + DebugLevel Level = iota + // InfoLevel defines info log level. + InfoLevel + // WarnLevel defines warn log level. + WarnLevel + // ErrorLevel defines error log level. + ErrorLevel + // FatalLevel defines fatal log level. + FatalLevel + // PanicLevel defines panic log level. + PanicLevel + // NoLevel defines an absent log level. + NoLevel + // Disabled disables the logger. + Disabled + + // TraceLevel defines trace log level. + TraceLevel Level = -1 + // Values less than TraceLevel are handled as numbers. +) + +func (l Level) String() string { + switch l { + case TraceLevel: + return LevelTraceValue + case DebugLevel: + return LevelDebugValue + case InfoLevel: + return LevelInfoValue + case WarnLevel: + return LevelWarnValue + case ErrorLevel: + return LevelErrorValue + case FatalLevel: + return LevelFatalValue + case PanicLevel: + return LevelPanicValue + case Disabled: + return "disabled" + case NoLevel: + return "" + } + return strconv.Itoa(int(l)) +} + +// ParseLevel converts a level string into a zerolog Level value. +// returns an error if the input string does not match known values. +func ParseLevel(levelStr string) (Level, error) { + switch { + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(TraceLevel)): + return TraceLevel, nil + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(DebugLevel)): + return DebugLevel, nil + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(InfoLevel)): + return InfoLevel, nil + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(WarnLevel)): + return WarnLevel, nil + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(ErrorLevel)): + return ErrorLevel, nil + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(FatalLevel)): + return FatalLevel, nil + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(PanicLevel)): + return PanicLevel, nil + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(Disabled)): + return Disabled, nil + case strings.EqualFold(levelStr, LevelFieldMarshalFunc(NoLevel)): + return NoLevel, nil + } + i, err := strconv.Atoi(levelStr) + if err != nil { + return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr) + } + if i > 127 || i < -128 { + return NoLevel, fmt.Errorf("Out-Of-Bounds Level: '%d', defaulting to NoLevel", i) + } + return Level(i), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler to allow for easy reading from toml/yaml/json formats +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errors.New("can't unmarshal a nil *Level") + } + var err error + *l, err = ParseLevel(string(text)) + return err +} + +// MarshalText implements encoding.TextMarshaler to allow for easy writing into toml/yaml/json formats +func (l Level) MarshalText() ([]byte, error) { + return []byte(LevelFieldMarshalFunc(l)), nil +} + +// A Logger represents an active logging object that generates lines +// of JSON output to an io.Writer. Each logging operation makes a single +// call to the Writer's Write method. There is no guarantee on access +// serialization to the Writer. If your Writer is not thread safe, +// you may consider a sync wrapper. +type Logger struct { + w LevelWriter + level Level + sampler Sampler + context []byte + hooks []Hook + stack bool + ctx context.Context +} + +// New creates a root logger with given output writer. If the output writer implements +// the LevelWriter interface, the WriteLevel method will be called instead of the Write +// one. +// +// Each logging operation makes a single call to the Writer's Write method. There is no +// guarantee on access serialization to the Writer. If your Writer is not thread safe, +// you may consider using sync wrapper. +func New(w io.Writer) Logger { + if w == nil { + w = ioutil.Discard + } + lw, ok := w.(LevelWriter) + if !ok { + lw = LevelWriterAdapter{w} + } + return Logger{w: lw, level: TraceLevel} +} + +// Nop returns a disabled logger for which all operation are no-op. +func Nop() Logger { + return New(nil).Level(Disabled) +} + +// Output duplicates the current logger and sets w as its output. +func (l Logger) Output(w io.Writer) Logger { + l2 := New(w) + l2.level = l.level + l2.sampler = l.sampler + l2.stack = l.stack + if len(l.hooks) > 0 { + l2.hooks = append(l2.hooks, l.hooks...) + } + if l.context != nil { + l2.context = make([]byte, len(l.context), cap(l.context)) + copy(l2.context, l.context) + } + return l2 +} + +// With creates a child logger with the field added to its context. +func (l Logger) With() Context { + context := l.context + l.context = make([]byte, 0, 500) + if context != nil { + l.context = append(l.context, context...) + } else { + // This is needed for AppendKey to not check len of input + // thus making it inlinable + l.context = enc.AppendBeginMarker(l.context) + } + return Context{l} +} + +// UpdateContext updates the internal logger's context. +// +// Caution: This method is not concurrency safe. +// Use the With method to create a child logger before modifying the context from concurrent goroutines. +func (l *Logger) UpdateContext(update func(c Context) Context) { + if l == disabledLogger { + return + } + if cap(l.context) == 0 { + l.context = make([]byte, 0, 500) + } + if len(l.context) == 0 { + l.context = enc.AppendBeginMarker(l.context) + } + c := update(Context{*l}) + l.context = c.l.context +} + +// Level creates a child logger with the minimum accepted level set to level. +func (l Logger) Level(lvl Level) Logger { + l.level = lvl + return l +} + +// GetLevel returns the current Level of l. +func (l Logger) GetLevel() Level { + return l.level +} + +// Sample returns a logger with the s sampler. +func (l Logger) Sample(s Sampler) Logger { + l.sampler = s + return l +} + +// Hook returns a logger with the h Hook. +func (l Logger) Hook(h Hook) Logger { + newHooks := make([]Hook, len(l.hooks), len(l.hooks)+1) + copy(newHooks, l.hooks) + l.hooks = append(newHooks, h) + return l +} + +// Trace starts a new message with trace level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Trace() *Event { + return l.newEvent(TraceLevel, nil) +} + +// Debug starts a new message with debug level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Debug() *Event { + return l.newEvent(DebugLevel, nil) +} + +// Info starts a new message with info level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Info() *Event { + return l.newEvent(InfoLevel, nil) +} + +// Warn starts a new message with warn level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Warn() *Event { + return l.newEvent(WarnLevel, nil) +} + +// Error starts a new message with error level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Error() *Event { + return l.newEvent(ErrorLevel, nil) +} + +// Err starts a new message with error level with err as a field if not nil or +// with info level if err is nil. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Err(err error) *Event { + if err != nil { + return l.Error().Err(err) + } + + return l.Info() +} + +// Fatal starts a new message with fatal level. The os.Exit(1) function +// is called by the Msg method, which terminates the program immediately. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Fatal() *Event { + return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) }) +} + +// Panic starts a new message with panic level. The panic() function +// is called by the Msg method, which stops the ordinary flow of a goroutine. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Panic() *Event { + return l.newEvent(PanicLevel, func(msg string) { panic(msg) }) +} + +// WithLevel starts a new message with level. Unlike Fatal and Panic +// methods, WithLevel does not terminate the program or stop the ordinary +// flow of a goroutine when used with their respective levels. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) WithLevel(level Level) *Event { + switch level { + case TraceLevel: + return l.Trace() + case DebugLevel: + return l.Debug() + case InfoLevel: + return l.Info() + case WarnLevel: + return l.Warn() + case ErrorLevel: + return l.Error() + case FatalLevel: + return l.newEvent(FatalLevel, nil) + case PanicLevel: + return l.newEvent(PanicLevel, nil) + case NoLevel: + return l.Log() + case Disabled: + return nil + default: + return l.newEvent(level, nil) + } +} + +// Log starts a new message with no level. Setting GlobalLevel to Disabled +// will still disable events produced by this method. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Log() *Event { + return l.newEvent(NoLevel, nil) +} + +// Print sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Print. +func (l *Logger) Print(v ...interface{}) { + if e := l.Debug(); e.Enabled() { + e.CallerSkipFrame(1).Msg(fmt.Sprint(v...)) + } +} + +// Printf sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Printf. +func (l *Logger) Printf(format string, v ...interface{}) { + if e := l.Debug(); e.Enabled() { + e.CallerSkipFrame(1).Msg(fmt.Sprintf(format, v...)) + } +} + +// Write implements the io.Writer interface. This is useful to set as a writer +// for the standard library log. +func (l Logger) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + // Trim CR added by stdlog. + p = p[0 : n-1] + } + l.Log().CallerSkipFrame(1).Msg(string(p)) + return +} + +func (l *Logger) newEvent(level Level, done func(string)) *Event { + enabled := l.should(level) + if !enabled { + if done != nil { + done("") + } + return nil + } + e := newEvent(l.w, level) + e.done = done + e.ch = l.hooks + e.ctx = l.ctx + if level != NoLevel && LevelFieldName != "" { + e.Str(LevelFieldName, LevelFieldMarshalFunc(level)) + } + if l.context != nil && len(l.context) > 1 { + e.buf = enc.AppendObjectData(e.buf, l.context) + } + if l.stack { + e.Stack() + } + return e +} + +// should returns true if the log event should be logged. +func (l *Logger) should(lvl Level) bool { + if lvl < l.level || lvl < GlobalLevel() { + return false + } + if l.sampler != nil && !samplingDisabled() { + return l.sampler.Sample(lvl) + } + return true +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/log/log.go b/hotelReservation/vendor/github.com/rs/zerolog/log/log.go new file mode 100644 index 000000000..a96ec5067 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/log/log.go @@ -0,0 +1,131 @@ +// Package log provides a global logger for zerolog. +package log + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/rs/zerolog" +) + +// Logger is the global logger. +var Logger = zerolog.New(os.Stderr).With().Timestamp().Logger() + +// Output duplicates the global logger and sets w as its output. +func Output(w io.Writer) zerolog.Logger { + return Logger.Output(w) +} + +// With creates a child logger with the field added to its context. +func With() zerolog.Context { + return Logger.With() +} + +// Level creates a child logger with the minimum accepted level set to level. +func Level(level zerolog.Level) zerolog.Logger { + return Logger.Level(level) +} + +// Sample returns a logger with the s sampler. +func Sample(s zerolog.Sampler) zerolog.Logger { + return Logger.Sample(s) +} + +// Hook returns a logger with the h Hook. +func Hook(h zerolog.Hook) zerolog.Logger { + return Logger.Hook(h) +} + +// Err starts a new message with error level with err as a field if not nil or +// with info level if err is nil. +// +// You must call Msg on the returned event in order to send the event. +func Err(err error) *zerolog.Event { + return Logger.Err(err) +} + +// Trace starts a new message with trace level. +// +// You must call Msg on the returned event in order to send the event. +func Trace() *zerolog.Event { + return Logger.Trace() +} + +// Debug starts a new message with debug level. +// +// You must call Msg on the returned event in order to send the event. +func Debug() *zerolog.Event { + return Logger.Debug() +} + +// Info starts a new message with info level. +// +// You must call Msg on the returned event in order to send the event. +func Info() *zerolog.Event { + return Logger.Info() +} + +// Warn starts a new message with warn level. +// +// You must call Msg on the returned event in order to send the event. +func Warn() *zerolog.Event { + return Logger.Warn() +} + +// Error starts a new message with error level. +// +// You must call Msg on the returned event in order to send the event. +func Error() *zerolog.Event { + return Logger.Error() +} + +// Fatal starts a new message with fatal level. The os.Exit(1) function +// is called by the Msg method. +// +// You must call Msg on the returned event in order to send the event. +func Fatal() *zerolog.Event { + return Logger.Fatal() +} + +// Panic starts a new message with panic level. The message is also sent +// to the panic function. +// +// You must call Msg on the returned event in order to send the event. +func Panic() *zerolog.Event { + return Logger.Panic() +} + +// WithLevel starts a new message with level. +// +// You must call Msg on the returned event in order to send the event. +func WithLevel(level zerolog.Level) *zerolog.Event { + return Logger.WithLevel(level) +} + +// Log starts a new message with no level. Setting zerolog.GlobalLevel to +// zerolog.Disabled will still disable events produced by this method. +// +// You must call Msg on the returned event in order to send the event. +func Log() *zerolog.Event { + return Logger.Log() +} + +// Print sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Print. +func Print(v ...interface{}) { + Logger.Debug().CallerSkipFrame(1).Msg(fmt.Sprint(v...)) +} + +// Printf sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Printf. +func Printf(format string, v ...interface{}) { + Logger.Debug().CallerSkipFrame(1).Msgf(format, v...) +} + +// Ctx returns the Logger associated with the ctx. If no logger +// is associated, a disabled logger is returned. +func Ctx(ctx context.Context) *zerolog.Logger { + return zerolog.Ctx(ctx) +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/not_go112.go b/hotelReservation/vendor/github.com/rs/zerolog/not_go112.go new file mode 100644 index 000000000..4c43c9e76 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/not_go112.go @@ -0,0 +1,5 @@ +// +build !go1.12 + +package zerolog + +const contextCallerSkipFrameCount = 3 diff --git a/hotelReservation/vendor/github.com/rs/zerolog/pretty.png b/hotelReservation/vendor/github.com/rs/zerolog/pretty.png new file mode 100644 index 000000000..242033686 Binary files /dev/null and b/hotelReservation/vendor/github.com/rs/zerolog/pretty.png differ diff --git a/hotelReservation/vendor/github.com/rs/zerolog/sampler.go b/hotelReservation/vendor/github.com/rs/zerolog/sampler.go new file mode 100644 index 000000000..1be98c4f9 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/sampler.go @@ -0,0 +1,134 @@ +package zerolog + +import ( + "math/rand" + "sync/atomic" + "time" +) + +var ( + // Often samples log every ~ 10 events. + Often = RandomSampler(10) + // Sometimes samples log every ~ 100 events. + Sometimes = RandomSampler(100) + // Rarely samples log every ~ 1000 events. + Rarely = RandomSampler(1000) +) + +// Sampler defines an interface to a log sampler. +type Sampler interface { + // Sample returns true if the event should be part of the sample, false if + // the event should be dropped. + Sample(lvl Level) bool +} + +// RandomSampler use a PRNG to randomly sample an event out of N events, +// regardless of their level. +type RandomSampler uint32 + +// Sample implements the Sampler interface. +func (s RandomSampler) Sample(lvl Level) bool { + if s <= 0 { + return false + } + if rand.Intn(int(s)) != 0 { + return false + } + return true +} + +// BasicSampler is a sampler that will send every Nth events, regardless of +// their level. +type BasicSampler struct { + N uint32 + counter uint32 +} + +// Sample implements the Sampler interface. +func (s *BasicSampler) Sample(lvl Level) bool { + n := s.N + if n == 1 { + return true + } + c := atomic.AddUint32(&s.counter, 1) + return c%n == 1 +} + +// BurstSampler lets Burst events pass per Period then pass the decision to +// NextSampler. If Sampler is not set, all subsequent events are rejected. +type BurstSampler struct { + // Burst is the maximum number of event per period allowed before calling + // NextSampler. + Burst uint32 + // Period defines the burst period. If 0, NextSampler is always called. + Period time.Duration + // NextSampler is the sampler used after the burst is reached. If nil, + // events are always rejected after the burst. + NextSampler Sampler + + counter uint32 + resetAt int64 +} + +// Sample implements the Sampler interface. +func (s *BurstSampler) Sample(lvl Level) bool { + if s.Burst > 0 && s.Period > 0 { + if s.inc() <= s.Burst { + return true + } + } + if s.NextSampler == nil { + return false + } + return s.NextSampler.Sample(lvl) +} + +func (s *BurstSampler) inc() uint32 { + now := time.Now().UnixNano() + resetAt := atomic.LoadInt64(&s.resetAt) + var c uint32 + if now > resetAt { + c = 1 + atomic.StoreUint32(&s.counter, c) + newResetAt := now + s.Period.Nanoseconds() + reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt) + if !reset { + // Lost the race with another goroutine trying to reset. + c = atomic.AddUint32(&s.counter, 1) + } + } else { + c = atomic.AddUint32(&s.counter, 1) + } + return c +} + +// LevelSampler applies a different sampler for each level. +type LevelSampler struct { + TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler +} + +func (s LevelSampler) Sample(lvl Level) bool { + switch lvl { + case TraceLevel: + if s.TraceSampler != nil { + return s.TraceSampler.Sample(lvl) + } + case DebugLevel: + if s.DebugSampler != nil { + return s.DebugSampler.Sample(lvl) + } + case InfoLevel: + if s.InfoSampler != nil { + return s.InfoSampler.Sample(lvl) + } + case WarnLevel: + if s.WarnSampler != nil { + return s.WarnSampler.Sample(lvl) + } + case ErrorLevel: + if s.ErrorSampler != nil { + return s.ErrorSampler.Sample(lvl) + } + } + return true +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/syslog.go b/hotelReservation/vendor/github.com/rs/zerolog/syslog.go new file mode 100644 index 000000000..c40828307 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/syslog.go @@ -0,0 +1,80 @@ +// +build !windows +// +build !binary_log + +package zerolog + +import ( + "io" +) + +// See http://cee.mitre.org/language/1.0-beta1/clt.html#syslog +// or https://www.rsyslog.com/json-elasticsearch/ +const ceePrefix = "@cee:" + +// SyslogWriter is an interface matching a syslog.Writer struct. +type SyslogWriter interface { + io.Writer + Debug(m string) error + Info(m string) error + Warning(m string) error + Err(m string) error + Emerg(m string) error + Crit(m string) error +} + +type syslogWriter struct { + w SyslogWriter + prefix string +} + +// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level +// method matching the zerolog level. +func SyslogLevelWriter(w SyslogWriter) LevelWriter { + return syslogWriter{w, ""} +} + +// SyslogCEEWriter wraps a SyslogWriter with a SyslogLevelWriter that adds a +// MITRE CEE prefix for JSON syslog entries, compatible with rsyslog +// and syslog-ng JSON logging support. +// See https://www.rsyslog.com/json-elasticsearch/ +func SyslogCEEWriter(w SyslogWriter) LevelWriter { + return syslogWriter{w, ceePrefix} +} + +func (sw syslogWriter) Write(p []byte) (n int, err error) { + var pn int + if sw.prefix != "" { + pn, err = sw.w.Write([]byte(sw.prefix)) + if err != nil { + return pn, err + } + } + n, err = sw.w.Write(p) + return pn + n, err +} + +// WriteLevel implements LevelWriter interface. +func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { + switch level { + case TraceLevel: + case DebugLevel: + err = sw.w.Debug(sw.prefix + string(p)) + case InfoLevel: + err = sw.w.Info(sw.prefix + string(p)) + case WarnLevel: + err = sw.w.Warning(sw.prefix + string(p)) + case ErrorLevel: + err = sw.w.Err(sw.prefix + string(p)) + case FatalLevel: + err = sw.w.Emerg(sw.prefix + string(p)) + case PanicLevel: + err = sw.w.Crit(sw.prefix + string(p)) + case NoLevel: + err = sw.w.Info(sw.prefix + string(p)) + default: + panic("invalid level") + } + // Any CEE prefix is not part of the message, so we don't include its length + n = len(p) + return +} diff --git a/hotelReservation/vendor/github.com/rs/zerolog/writer.go b/hotelReservation/vendor/github.com/rs/zerolog/writer.go new file mode 100644 index 000000000..9b9ef88e8 --- /dev/null +++ b/hotelReservation/vendor/github.com/rs/zerolog/writer.go @@ -0,0 +1,182 @@ +package zerolog + +import ( + "bytes" + "io" + "path" + "runtime" + "strconv" + "strings" + "sync" +) + +// LevelWriter defines as interface a writer may implement in order +// to receive level information with payload. +type LevelWriter interface { + io.Writer + WriteLevel(level Level, p []byte) (n int, err error) +} + +// LevelWriterAdapter adapts an io.Writer to support the LevelWriter interface. +type LevelWriterAdapter struct { + io.Writer +} + +// WriteLevel simply writes everything to the adapted writer, ignoring the level. +func (lw LevelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { + return lw.Write(p) +} + +type syncWriter struct { + mu sync.Mutex + lw LevelWriter +} + +// SyncWriter wraps w so that each call to Write is synchronized with a mutex. +// This syncer can be used to wrap the call to writer's Write method if it is +// not thread safe. Note that you do not need this wrapper for os.File Write +// operations on POSIX and Windows systems as they are already thread-safe. +func SyncWriter(w io.Writer) io.Writer { + if lw, ok := w.(LevelWriter); ok { + return &syncWriter{lw: lw} + } + return &syncWriter{lw: LevelWriterAdapter{w}} +} + +// Write implements the io.Writer interface. +func (s *syncWriter) Write(p []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.lw.Write(p) +} + +// WriteLevel implements the LevelWriter interface. +func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.lw.WriteLevel(l, p) +} + +type multiLevelWriter struct { + writers []LevelWriter +} + +func (t multiLevelWriter) Write(p []byte) (n int, err error) { + for _, w := range t.writers { + if _n, _err := w.Write(p); err == nil { + n = _n + if _err != nil { + err = _err + } else if _n != len(p) { + err = io.ErrShortWrite + } + } + } + return n, err +} + +func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { + for _, w := range t.writers { + if _n, _err := w.WriteLevel(l, p); err == nil { + n = _n + if _err != nil { + err = _err + } else if _n != len(p) { + err = io.ErrShortWrite + } + } + } + return n, err +} + +// MultiLevelWriter creates a writer that duplicates its writes to all the +// provided writers, similar to the Unix tee(1) command. If some writers +// implement LevelWriter, their WriteLevel method will be used instead of Write. +func MultiLevelWriter(writers ...io.Writer) LevelWriter { + lwriters := make([]LevelWriter, 0, len(writers)) + for _, w := range writers { + if lw, ok := w.(LevelWriter); ok { + lwriters = append(lwriters, lw) + } else { + lwriters = append(lwriters, LevelWriterAdapter{w}) + } + } + return multiLevelWriter{lwriters} +} + +// TestingLog is the logging interface of testing.TB. +type TestingLog interface { + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Helper() +} + +// TestWriter is a writer that writes to testing.TB. +type TestWriter struct { + T TestingLog + + // Frame skips caller frames to capture the original file and line numbers. + Frame int +} + +// NewTestWriter creates a writer that logs to the testing.TB. +func NewTestWriter(t TestingLog) TestWriter { + return TestWriter{T: t} +} + +// Write to testing.TB. +func (t TestWriter) Write(p []byte) (n int, err error) { + t.T.Helper() + + n = len(p) + + // Strip trailing newline because t.Log always adds one. + p = bytes.TrimRight(p, "\n") + + // Try to correct the log file and line number to the caller. + if t.Frame > 0 { + _, origFile, origLine, _ := runtime.Caller(1) + _, frameFile, frameLine, ok := runtime.Caller(1 + t.Frame) + if ok { + erase := strings.Repeat("\b", len(path.Base(origFile))+len(strconv.Itoa(origLine))+3) + t.T.Logf("%s%s:%d: %s", erase, path.Base(frameFile), frameLine, p) + return n, err + } + } + t.T.Log(string(p)) + + return n, err +} + +// ConsoleTestWriter creates an option that correctly sets the file frame depth for testing.TB log. +func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) { + return func(w *ConsoleWriter) { + w.Out = TestWriter{T: t, Frame: 6} + } +} + +// FilteredLevelWriter writes only logs at Level or above to Writer. +// +// It should be used only in combination with MultiLevelWriter when you +// want to write to multiple destinations at different levels. Otherwise +// you should just set the level on the logger and filter events early. +// When using MultiLevelWriter then you set the level on the logger to +// the lowest of the levels you use for writers. +type FilteredLevelWriter struct { + Writer LevelWriter + Level Level +} + +// Write writes to the underlying Writer. +func (w *FilteredLevelWriter) Write(p []byte) (int, error) { + return w.Writer.Write(p) +} + +// WriteLevel calls WriteLevel of the underlying Writer only if the level is equal +// or above the Level. +func (w *FilteredLevelWriter) WriteLevel(level Level, p []byte) (int, error) { + if level >= w.Level { + return w.Writer.WriteLevel(level, p) + } + return len(p), nil +} diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/.gitignore b/hotelReservation/vendor/github.com/segmentio/ksuid/.gitignore deleted file mode 100644 index 4b7a3f38b..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/.gitignore +++ /dev/null @@ -1,31 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/ksuid - -# Emacs -*~ - -# govendor -/vendor/*/ diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/LICENSE.md b/hotelReservation/vendor/github.com/segmentio/ksuid/LICENSE.md deleted file mode 100644 index aefb79318..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Segment.io - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/README.md b/hotelReservation/vendor/github.com/segmentio/ksuid/README.md deleted file mode 100644 index 45dcd5748..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# ksuid [![Go Report Card](https://goreportcard.com/badge/github.com/segmentio/ksuid)](https://goreportcard.com/report/github.com/segmentio/ksuid) [![GoDoc](https://godoc.org/github.com/segmentio/ksuid?status.svg)](https://godoc.org/github.com/segmentio/ksuid) - -ksuid is a Go library that can generate and parse KSUIDs. - -# What is a KSUID? - -KSUID is for K-Sortable Unique IDentifier. It's a way to generate globally -unique IDs similar to RFC 4122 UUIDs, but contain a time component so they -can be "roughly" sorted by time of creation. The remainder of the KSUID is -randomly generated bytes. - -# Why use KSUIDs? - -Distributed systems often require unique IDs. There are numerous solutions -out there for doing this, so why KSUID? - -## 1. Sortable by Timestamp - -Unlike the more common choice of UUIDv4, KSUIDs contain a timestamp component -that allows them to be roughly sorted by generation time. This is obviously not -a strong guarantee as it depends on wall clocks, but is still incredibly useful -in practice. - -## 2. No Coordination Required - -Snowflake IDs[1] and derivatives require coordination, which significantly -increases the complexity of implementation and creates operations overhead. -While RFC 4122 UUIDv1s do have a time component, there aren't enough bytes of -randomness to provide strong protections against duplicate ID generation. - -KSUIDs use 128-bits of pseudorandom data, which provides a 64-times larger -number space than the 122-bits in the well-accepted RFC 4122 UUIDv4 standard. -The additional timestamp component drives down the extremely rare chance of -duplication to the point of near physical infeasibility, even assuming extreme -clock skew (> 24-hours) that would cause other severe anomalies. - -1. https://blog.twitter.com/2010/announcing-snowflake - -## 3. Lexographically Sortable, Portable Representations - -The binary and string representations are lexicographically sortable, which -allows them to be dropped into systems which do not natively support KSUIDs -and retain their k-sortable characteristics. - -The string representation is that it is base62-encoded, so that they can "fit" -anywhere alphanumeric strings are accepted. - -# How do they work? - -KSUIDs are 20-bytes: a 32-bit unsigned integer UTC timestamp and a 128-bit -randomly generated payload. The timestamp uses big-endian encoding, to allow -lexicographic sorting. The timestamp epoch is adjusted to March 5th, 2014, -providing over 100 years of useful life starting at UNIX epoch + 14e8. The -payload uses a cryptographically-strong pseudorandom number generator. - -The string representation is fixed at 27-characters encoded using a base62 -encoding that also sorts lexicographically. - -# Command Line Tool - -This package comes with a simple command-line tool `ksuid`. This tool can -generate KSUIDs as well as inspect the internal components for debugging -purposes. - -## Usage examples - -### Generate 4 KSUID - -```sh -$ ./ksuid -n 4 -0ujsszwN8NRY24YaXiTIE2VWDTS -0ujsswThIGTUYm2K8FjOOfXtY1K -0ujssxh0cECutqzMgbtXSGnjorm -0ujsszgFvbiEr7CDgE3z8MAUPFt -``` - -### Inspect the components of a KSUID - -Using the inspect formatting on just 1 ksuid: - -```sh -$ ./ksuid -f inspect $(./ksuid) - -REPRESENTATION: - - String: 0ujtsYcgvSTl8PAuAdqWYSMnLOv - Raw: 0669F7EFB5A1CD34B5F99D1154FB6853345C9735 - -COMPONENTS: - - Time: 2017-10-09 21:00:47 -0700 PDT - Timestamp: 107608047 - Payload: B5A1CD34B5F99D1154FB6853345C9735 -``` - -Using the template formatting on 4 ksuid: - -```sh -$ ./ksuid -f template -t '{{ .Time }}: {{ .Payload }}' $(./ksuid -n 4) -2017-10-09 21:05:37 -0700 PDT: 304102BC687E087CC3A811F21D113CCF -2017-10-09 21:05:37 -0700 PDT: EAF0B240A9BFA55E079D887120D962F0 -2017-10-09 21:05:37 -0700 PDT: DF0761769909ABB0C7BB9D66F79FC041 -2017-10-09 21:05:37 -0700 PDT: 1A8F0E3D0BDEB84A5FAD702876F46543 -``` - -### Generate detailed versions of new KSUID - -Generate a new KSUID with the corresponding time using the time formatting: - -```sh -$ go run cmd/ksuid/main.go -f time -v -0uk0ava2lavfJwMceJOOEFXEDxl: 2017-10-09 21:56:00 -0700 PDT -``` - -Generate 4 new KSUID with details using template formatting: - -```sh -$ ./ksuid -f template -t '{ "timestamp": "{{ .Timestamp }}", "payload": "{{ .Payload }}", "ksuid": "{{.String}}"}' -n 4 -{ "timestamp": "107611700", "payload": "9850EEEC191BF4FF26F99315CE43B0C8", "ksuid": "0uk1Hbc9dQ9pxyTqJ93IUrfhdGq"} -{ "timestamp": "107611700", "payload": "CC55072555316F45B8CA2D2979D3ED0A", "ksuid": "0uk1HdCJ6hUZKDgcxhpJwUl5ZEI"} -{ "timestamp": "107611700", "payload": "BA1C205D6177F0992D15EE606AE32238", "ksuid": "0uk1HcdvF0p8C20KtTfdRSB9XIm"} -{ "timestamp": "107611700", "payload": "67517BA309EA62AE7991B27BB6F2FCAC", "ksuid": "0uk1Ha7hGJ1Q9Xbnkt0yZgNwg3g"} -``` - -Display the detailed version of a new KSUID: - -```sh -$ ./ksuid -f inspect - -REPRESENTATION: - - String: 0ujzPyRiIAffKhBux4PvQdDqMHY - Raw: 066A029C73FC1AA3B2446246D6E89FCD909E8FE8 - -COMPONENTS: - - Time: 2017-10-09 21:46:20 -0700 PDT - Timestamp: 107610780 - Payload: 73FC1AA3B2446246D6E89FCD909E8FE8 - -``` diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/base62.go b/hotelReservation/vendor/github.com/segmentio/ksuid/base62.go deleted file mode 100644 index f92cb0eef..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/base62.go +++ /dev/null @@ -1,207 +0,0 @@ -package ksuid - -import "errors" - -const ( - // lexographic ordering (based on Unicode table) is 0-9A-Za-z - base62Characters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - zeroString = "000000000000000000000000000" - offsetUppercase = 10 - offsetLowercase = 36 -) - -var ( - errShortBuffer = errors.New("the output buffer is too small to hold to decoded value") -) - -// Converts a base 62 byte into the number value that it represents. -func base62Value(digit byte) byte { - switch { - case digit >= '0' && digit <= '9': - return digit - '0' - case digit >= 'A' && digit <= 'Z': - return offsetUppercase + (digit - 'A') - default: - return offsetLowercase + (digit - 'a') - } -} - -// This function encodes the base 62 representation of the src KSUID in binary -// form into dst. -// -// In order to support a couple of optimizations the function assumes that src -// is 20 bytes long and dst is 27 bytes long. -// -// Any unused bytes in dst will be set to the padding '0' byte. -func fastEncodeBase62(dst []byte, src []byte) { - const srcBase = 4294967296 - const dstBase = 62 - - // Split src into 5 4-byte words, this is where most of the efficiency comes - // from because this is a O(N^2) algorithm, and we make N = N / 4 by working - // on 32 bits at a time. - parts := [5]uint32{ - /* - These is an inlined version of: - - binary.BigEndian.Uint32(src[0:4]), - binary.BigEndian.Uint32(src[4:8]), - binary.BigEndian.Uint32(src[8:12]), - binary.BigEndian.Uint32(src[12:16]), - binary.BigEndian.Uint32(src[16:20]), - - For some reason it gave better performance, may be caused by the - bound check that the Uint32 function does. - */ - uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]), - uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]), - uint32(src[8])<<24 | uint32(src[9])<<16 | uint32(src[10])<<8 | uint32(src[11]), - uint32(src[12])<<24 | uint32(src[13])<<16 | uint32(src[14])<<8 | uint32(src[15]), - uint32(src[16])<<24 | uint32(src[17])<<16 | uint32(src[18])<<8 | uint32(src[19]), - } - - n := len(dst) - bp := parts[:] - bq := [5]uint32{} - - for len(bp) != 0 { - quotient := bq[:0] - remainder := uint64(0) - - for _, c := range bp { - value := uint64(c) + uint64(remainder)*srcBase - digit := value / dstBase - remainder = value % dstBase - - if len(quotient) != 0 || digit != 0 { - quotient = append(quotient, uint32(digit)) - } - } - - // Writes at the end of the destination buffer because we computed the - // lowest bits first. - n-- - dst[n] = base62Characters[remainder] - bp = quotient - } - - // Add padding at the head of the destination buffer for all bytes that were - // not set. - copy(dst[:n], zeroString) -} - -// This function appends the base 62 representation of the KSUID in src to dst, -// and returns the extended byte slice. -// The result is left-padded with '0' bytes to always append 27 bytes to the -// destination buffer. -func fastAppendEncodeBase62(dst []byte, src []byte) []byte { - dst = reserve(dst, stringEncodedLength) - n := len(dst) - fastEncodeBase62(dst[n:n+stringEncodedLength], src) - return dst[:n+stringEncodedLength] -} - -// This function decodes the base 62 representation of the src KSUID to the -// binary form into dst. -// -// In order to support a couple of optimizations the function assumes that src -// is 27 bytes long and dst is 20 bytes long. -// -// Any unused bytes in dst will be set to zero. -func fastDecodeBase62(dst []byte, src []byte) error { - const srcBase = 62 - const dstBase = 4294967296 - - parts := [27]byte{ - base62Value(src[0]), - base62Value(src[1]), - base62Value(src[2]), - base62Value(src[3]), - base62Value(src[4]), - base62Value(src[5]), - base62Value(src[6]), - base62Value(src[7]), - base62Value(src[8]), - base62Value(src[9]), - - base62Value(src[10]), - base62Value(src[11]), - base62Value(src[12]), - base62Value(src[13]), - base62Value(src[14]), - base62Value(src[15]), - base62Value(src[16]), - base62Value(src[17]), - base62Value(src[18]), - base62Value(src[19]), - - base62Value(src[20]), - base62Value(src[21]), - base62Value(src[22]), - base62Value(src[23]), - base62Value(src[24]), - base62Value(src[25]), - base62Value(src[26]), - } - - n := len(dst) - bp := parts[:] - bq := [stringEncodedLength]byte{} - - for len(bp) > 0 { - quotient := bq[:0] - remainder := uint64(0) - - for _, c := range bp { - value := uint64(c) + uint64(remainder)*srcBase - digit := value / dstBase - remainder = value % dstBase - - if len(quotient) != 0 || digit != 0 { - quotient = append(quotient, byte(digit)) - } - } - - if n < 4 { - return errShortBuffer - } - - dst[n-4] = byte(remainder >> 24) - dst[n-3] = byte(remainder >> 16) - dst[n-2] = byte(remainder >> 8) - dst[n-1] = byte(remainder) - n -= 4 - bp = quotient - } - - var zero [20]byte - copy(dst[:n], zero[:]) - return nil -} - -// This function appends the base 62 decoded version of src into dst. -func fastAppendDecodeBase62(dst []byte, src []byte) []byte { - dst = reserve(dst, byteLength) - n := len(dst) - fastDecodeBase62(dst[n:n+byteLength], src) - return dst[:n+byteLength] -} - -// Ensures that at least nbytes are available in the remaining capacity of the -// destination slice, if not, a new copy is made and returned by the function. -func reserve(dst []byte, nbytes int) []byte { - c := cap(dst) - n := len(dst) - - if avail := c - n; avail < nbytes { - c *= 2 - if (c - n) < nbytes { - c = n + nbytes - } - b := make([]byte, n, c) - copy(b, dst) - dst = b - } - - return dst -} diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/ksuid.go b/hotelReservation/vendor/github.com/segmentio/ksuid/ksuid.go deleted file mode 100644 index 3a33483ec..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/ksuid.go +++ /dev/null @@ -1,352 +0,0 @@ -package ksuid - -import ( - "bytes" - "crypto/rand" - "database/sql/driver" - "encoding/binary" - "fmt" - "io" - "math" - "sync" - "time" -) - -const ( - // KSUID's epoch starts more recently so that the 32-bit number space gives a - // significantly higher useful lifetime of around 136 years from March 2017. - // This number (14e8) was picked to be easy to remember. - epochStamp int64 = 1400000000 - - // Timestamp is a uint32 - timestampLengthInBytes = 4 - - // Payload is 16-bytes - payloadLengthInBytes = 16 - - // KSUIDs are 20 bytes when binary encoded - byteLength = timestampLengthInBytes + payloadLengthInBytes - - // The length of a KSUID when string (base62) encoded - stringEncodedLength = 27 - - // A string-encoded minimum value for a KSUID - minStringEncoded = "000000000000000000000000000" - - // A string-encoded maximum value for a KSUID - maxStringEncoded = "aWgEPTl1tmebfsQzFP4bxwgy80V" -) - -// KSUIDs are 20 bytes: -// 00-03 byte: uint32 BE UTC timestamp with custom epoch -// 04-19 byte: random "payload" -type KSUID [byteLength]byte - -var ( - rander = rand.Reader - randMutex = sync.Mutex{} - randBuffer = [payloadLengthInBytes]byte{} - - errSize = fmt.Errorf("Valid KSUIDs are %v bytes", byteLength) - errStrSize = fmt.Errorf("Valid encoded KSUIDs are %v characters", stringEncodedLength) - errStrValue = fmt.Errorf("Valid encoded KSUIDs are bounded by %s and %s", minStringEncoded, maxStringEncoded) - errPayloadSize = fmt.Errorf("Valid KSUID payloads are %v bytes", payloadLengthInBytes) - - // Represents a completely empty (invalid) KSUID - Nil KSUID - // Represents the highest value a KSUID can have - Max = KSUID{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255} -) - -// Append appends the string representation of i to b, returning a slice to a -// potentially larger memory area. -func (i KSUID) Append(b []byte) []byte { - return fastAppendEncodeBase62(b, i[:]) -} - -// The timestamp portion of the ID as a Time object -func (i KSUID) Time() time.Time { - return correctedUTCTimestampToTime(i.Timestamp()) -} - -// The timestamp portion of the ID as a bare integer which is uncorrected -// for KSUID's special epoch. -func (i KSUID) Timestamp() uint32 { - return binary.BigEndian.Uint32(i[:timestampLengthInBytes]) -} - -// The 16-byte random payload without the timestamp -func (i KSUID) Payload() []byte { - return i[timestampLengthInBytes:] -} - -// String-encoded representation that can be passed through Parse() -func (i KSUID) String() string { - return string(i.Append(make([]byte, 0, stringEncodedLength))) -} - -// Raw byte representation of KSUID -func (i KSUID) Bytes() []byte { - // Safe because this is by-value - return i[:] -} - -// Returns true if this is a "nil" KSUID -func (i KSUID) IsNil() bool { - return i == Nil -} - -// Get satisfies the flag.Getter interface, making it possible to use KSUIDs as -// part of of the command line options of a program. -func (i KSUID) Get() interface{} { - return i -} - -// Set satisfies the flag.Value interface, making it possible to use KSUIDs as -// part of of the command line options of a program. -func (i *KSUID) Set(s string) error { - return i.UnmarshalText([]byte(s)) -} - -func (i KSUID) MarshalText() ([]byte, error) { - return []byte(i.String()), nil -} - -func (i KSUID) MarshalBinary() ([]byte, error) { - return i.Bytes(), nil -} - -func (i *KSUID) UnmarshalText(b []byte) error { - id, err := Parse(string(b)) - if err != nil { - return err - } - *i = id - return nil -} - -func (i *KSUID) UnmarshalBinary(b []byte) error { - id, err := FromBytes(b) - if err != nil { - return err - } - *i = id - return nil -} - -// Value converts the KSUID into a SQL driver value which can be used to -// directly use the KSUID as parameter to a SQL query. -func (i KSUID) Value() (driver.Value, error) { - if i.IsNil() { - return nil, nil - } - return i.String(), nil -} - -// Scan implements the sql.Scanner interface. It supports converting from -// string, []byte, or nil into a KSUID value. Attempting to convert from -// another type will return an error. -func (i *KSUID) Scan(src interface{}) error { - switch v := src.(type) { - case nil: - return i.scan(nil) - case []byte: - return i.scan(v) - case string: - return i.scan([]byte(v)) - default: - return fmt.Errorf("Scan: unable to scan type %T into KSUID", v) - } -} - -func (i *KSUID) scan(b []byte) error { - switch len(b) { - case 0: - *i = Nil - return nil - case byteLength: - return i.UnmarshalBinary(b) - case stringEncodedLength: - return i.UnmarshalText(b) - default: - return errSize - } -} - -// Decodes a string-encoded representation of a KSUID object -func Parse(s string) (KSUID, error) { - if len(s) != stringEncodedLength { - return Nil, errStrSize - } - - src := [stringEncodedLength]byte{} - dst := [byteLength]byte{} - - copy(src[:], s[:]) - - if err := fastDecodeBase62(dst[:], src[:]); err != nil { - return Nil, errStrValue - } - - return FromBytes(dst[:]) -} - -func timeToCorrectedUTCTimestamp(t time.Time) uint32 { - return uint32(t.Unix() - epochStamp) -} - -func correctedUTCTimestampToTime(ts uint32) time.Time { - return time.Unix(int64(ts)+epochStamp, 0) -} - -// Generates a new KSUID. In the strange case that random bytes -// can't be read, it will panic. -func New() KSUID { - ksuid, err := NewRandom() - if err != nil { - panic(fmt.Sprintf("Couldn't generate KSUID, inconceivable! error: %v", err)) - } - return ksuid -} - -// Generates a new KSUID -func NewRandom() (ksuid KSUID, err error) { - return NewRandomWithTime(time.Now()) -} - -func NewRandomWithTime(t time.Time) (ksuid KSUID, err error) { - // Go's default random number generators are not safe for concurrent use by - // multiple goroutines, the use of the rander and randBuffer are explicitly - // synchronized here. - randMutex.Lock() - - _, err = io.ReadAtLeast(rander, randBuffer[:], len(randBuffer)) - copy(ksuid[timestampLengthInBytes:], randBuffer[:]) - - randMutex.Unlock() - - if err != nil { - ksuid = Nil // don't leak random bytes on error - return - } - - ts := timeToCorrectedUTCTimestamp(t) - binary.BigEndian.PutUint32(ksuid[:timestampLengthInBytes], ts) - return -} - -// Constructs a KSUID from constituent parts -func FromParts(t time.Time, payload []byte) (KSUID, error) { - if len(payload) != payloadLengthInBytes { - return Nil, errPayloadSize - } - - var ksuid KSUID - - ts := timeToCorrectedUTCTimestamp(t) - binary.BigEndian.PutUint32(ksuid[:timestampLengthInBytes], ts) - - copy(ksuid[timestampLengthInBytes:], payload) - - return ksuid, nil -} - -// Constructs a KSUID from a 20-byte binary representation -func FromBytes(b []byte) (KSUID, error) { - var ksuid KSUID - - if len(b) != byteLength { - return Nil, errSize - } - - copy(ksuid[:], b) - return ksuid, nil -} - -// Sets the global source of random bytes for KSUID generation. This -// should probably only be set once globally. While this is technically -// thread-safe as in it won't cause corruption, there's no guarantee -// on ordering. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} - -// Implements comparison for KSUID type -func Compare(a, b KSUID) int { - return bytes.Compare(a[:], b[:]) -} - -// Sorts the given slice of KSUIDs -func Sort(ids []KSUID) { - quickSort(ids, 0, len(ids)-1) -} - -// Checks whether a slice of KSUIDs is sorted -func IsSorted(ids []KSUID) bool { - if len(ids) != 0 { - min := ids[0] - for _, id := range ids[1:] { - if bytes.Compare(min[:], id[:]) > 0 { - return false - } - min = id - } - } - return true -} - -func quickSort(a []KSUID, lo int, hi int) { - if lo < hi { - pivot := a[hi] - i := lo - 1 - - for j, n := lo, hi; j != n; j++ { - if bytes.Compare(a[j][:], pivot[:]) < 0 { - i++ - a[i], a[j] = a[j], a[i] - } - } - - i++ - if bytes.Compare(a[hi][:], a[i][:]) < 0 { - a[i], a[hi] = a[hi], a[i] - } - - quickSort(a, lo, i-1) - quickSort(a, i+1, hi) - } -} - -// Next returns the next KSUID after id. -func (id KSUID) Next() KSUID { - zero := makeUint128(0, 0) - - t := id.Timestamp() - u := uint128Payload(id) - v := add128(u, makeUint128(0, 1)) - - if v == zero { // overflow - t++ - } - - return v.ksuid(t) -} - -// Prev returns the previoud KSUID before id. -func (id KSUID) Prev() KSUID { - max := makeUint128(math.MaxUint64, math.MaxUint64) - - t := id.Timestamp() - u := uint128Payload(id) - v := sub128(u, makeUint128(0, 1)) - - if v == max { // overflow - t-- - } - - return v.ksuid(t) -} diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/rand.go b/hotelReservation/vendor/github.com/segmentio/ksuid/rand.go deleted file mode 100644 index 66edbd4d8..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/rand.go +++ /dev/null @@ -1,55 +0,0 @@ -package ksuid - -import ( - cryptoRand "crypto/rand" - "encoding/binary" - "io" - "math/rand" -) - -// FastRander is an io.Reader that uses math/rand and is optimized for -// generating 16 bytes KSUID payloads. It is intended to be used as a -// performance improvements for programs that have no need for -// cryptographically secure KSUIDs and are generating a lot of them. -var FastRander = newRBG() - -func newRBG() io.Reader { - r, err := newRandomBitsGenerator() - if err != nil { - panic(err) - } - return r -} - -func newRandomBitsGenerator() (r io.Reader, err error) { - var seed int64 - - if seed, err = readCryptoRandomSeed(); err != nil { - return - } - - r = &randSourceReader{source: rand.NewSource(seed).(rand.Source64)} - return -} - -func readCryptoRandomSeed() (seed int64, err error) { - var b [8]byte - - if _, err = io.ReadFull(cryptoRand.Reader, b[:]); err != nil { - return - } - - seed = int64(binary.LittleEndian.Uint64(b[:])) - return -} - -type randSourceReader struct { - source rand.Source64 -} - -func (r *randSourceReader) Read(b []byte) (int, error) { - // optimized for generating 16 bytes payloads - binary.LittleEndian.PutUint64(b[:8], r.source.Uint64()) - binary.LittleEndian.PutUint64(b[8:], r.source.Uint64()) - return 16, nil -} diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/sequence.go b/hotelReservation/vendor/github.com/segmentio/ksuid/sequence.go deleted file mode 100644 index 9f1c33a0c..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/sequence.go +++ /dev/null @@ -1,55 +0,0 @@ -package ksuid - -import ( - "encoding/binary" - "errors" - "math" -) - -// Sequence is a KSUID generator which produces a sequence of ordered KSUIDs -// from a seed. -// -// Up to 65536 KSUIDs can be generated by for a single seed. -// -// A typical usage of a Sequence looks like this: -// -// seq := ksuid.Sequence{ -// Seed: ksuid.New(), -// } -// id, err := seq.Next() -// -// Sequence values are not safe to use concurrently from multiple goroutines. -type Sequence struct { - // The seed is used as base for the KSUID generator, all generated KSUIDs - // share the same leading 18 bytes of the seed. - Seed KSUID - count uint32 // uint32 for overflow, only 2 bytes are used -} - -// Next produces the next KSUID in the sequence, or returns an error if the -// sequence has been exhausted. -func (seq *Sequence) Next() (KSUID, error) { - id := seq.Seed // copy - count := seq.count - if count > math.MaxUint16 { - return Nil, errors.New("too many IDs were generated") - } - seq.count++ - return withSequenceNumber(id, uint16(count)), nil -} - -// Bounds returns the inclusive min and max bounds of the KSUIDs that may be -// generated by the sequence. If all ids have been generated already then the -// returned min value is equal to the max. -func (seq *Sequence) Bounds() (min KSUID, max KSUID) { - count := seq.count - if count > math.MaxUint16 { - count = math.MaxUint16 - } - return withSequenceNumber(seq.Seed, uint16(count)), withSequenceNumber(seq.Seed, math.MaxUint16) -} - -func withSequenceNumber(id KSUID, n uint16) KSUID { - binary.BigEndian.PutUint16(id[len(id)-2:], n) - return id -} diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/set.go b/hotelReservation/vendor/github.com/segmentio/ksuid/set.go deleted file mode 100644 index a6b0e6582..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/set.go +++ /dev/null @@ -1,343 +0,0 @@ -package ksuid - -import ( - "bytes" - "encoding/binary" -) - -// CompressedSet is an immutable data type which stores a set of KSUIDs. -type CompressedSet []byte - -// Iter returns an iterator that produces all KSUIDs in the set. -func (set CompressedSet) Iter() CompressedSetIter { - return CompressedSetIter{ - content: []byte(set), - } -} - -// String satisfies the fmt.Stringer interface, returns a human-readable string -// representation of the set. -func (set CompressedSet) String() string { - b := bytes.Buffer{} - b.WriteByte('[') - set.writeTo(&b) - b.WriteByte(']') - return b.String() -} - -// String satisfies the fmt.GoStringer interface, returns a Go representation of -// the set. -func (set CompressedSet) GoString() string { - b := bytes.Buffer{} - b.WriteString("ksuid.CompressedSet{") - set.writeTo(&b) - b.WriteByte('}') - return b.String() -} - -func (set CompressedSet) writeTo(b *bytes.Buffer) { - a := [27]byte{} - - for i, it := 0, set.Iter(); it.Next(); i++ { - if i != 0 { - b.WriteString(", ") - } - b.WriteByte('"') - it.KSUID.Append(a[:0]) - b.Write(a[:]) - b.WriteByte('"') - } -} - -// Compress creates and returns a compressed set of KSUIDs from the list given -// as arguments. -func Compress(ids ...KSUID) CompressedSet { - c := 1 + byteLength + (len(ids) / 5) - b := make([]byte, 0, c) - return AppendCompressed(b, ids...) -} - -// AppendCompressed uses the given byte slice as pre-allocated storage space to -// build a KSUID set. -// -// Note that the set uses a compression technique to store the KSUIDs, so the -// resuling length is not 20 x len(ids). The rule of thumb here is for the given -// byte slice to reserve the amount of memory that the application would be OK -// to waste. -func AppendCompressed(set []byte, ids ...KSUID) CompressedSet { - if len(ids) != 0 { - if !IsSorted(ids) { - Sort(ids) - } - one := makeUint128(0, 1) - - // The first KSUID is always written to the set, this is the starting - // point for all deltas. - set = append(set, byte(rawKSUID)) - set = append(set, ids[0][:]...) - - timestamp := ids[0].Timestamp() - lastKSUID := ids[0] - lastValue := uint128Payload(ids[0]) - - for i := 1; i != len(ids); i++ { - id := ids[i] - - if id == lastKSUID { - continue - } - - t := id.Timestamp() - v := uint128Payload(id) - - if t != timestamp { - d := t - timestamp - n := varintLength32(d) - - set = append(set, timeDelta|byte(n)) - set = appendVarint32(set, d, n) - set = append(set, id[timestampLengthInBytes:]...) - - timestamp = t - } else { - d := sub128(v, lastValue) - - if d != one { - n := varintLength128(d) - - set = append(set, payloadDelta|byte(n)) - set = appendVarint128(set, d, n) - } else { - l, c := rangeLength(ids[i+1:], t, id, v) - m := uint64(l + 1) - n := varintLength64(m) - - set = append(set, payloadRange|byte(n)) - set = appendVarint64(set, m, n) - - i += c - id = ids[i] - v = uint128Payload(id) - } - } - - lastKSUID = id - lastValue = v - } - } - return CompressedSet(set) -} - -func rangeLength(ids []KSUID, timestamp uint32, lastKSUID KSUID, lastValue uint128) (length int, count int) { - one := makeUint128(0, 1) - - for i := range ids { - id := ids[i] - - if id == lastKSUID { - continue - } - - if id.Timestamp() != timestamp { - count = i - return - } - - v := uint128Payload(id) - - if sub128(v, lastValue) != one { - count = i - return - } - - lastKSUID = id - lastValue = v - length++ - } - - count = len(ids) - return -} - -func appendVarint128(b []byte, v uint128, n int) []byte { - c := v.bytes() - return append(b, c[len(c)-n:]...) -} - -func appendVarint64(b []byte, v uint64, n int) []byte { - c := [8]byte{} - binary.BigEndian.PutUint64(c[:], v) - return append(b, c[len(c)-n:]...) -} - -func appendVarint32(b []byte, v uint32, n int) []byte { - c := [4]byte{} - binary.BigEndian.PutUint32(c[:], v) - return append(b, c[len(c)-n:]...) -} - -func varint128(b []byte) uint128 { - a := [16]byte{} - copy(a[16-len(b):], b) - return makeUint128FromPayload(a[:]) -} - -func varint64(b []byte) uint64 { - a := [8]byte{} - copy(a[8-len(b):], b) - return binary.BigEndian.Uint64(a[:]) -} - -func varint32(b []byte) uint32 { - a := [4]byte{} - copy(a[4-len(b):], b) - return binary.BigEndian.Uint32(a[:]) -} - -func varintLength128(v uint128) int { - if v[1] != 0 { - return 8 + varintLength64(v[1]) - } - return varintLength64(v[0]) -} - -func varintLength64(v uint64) int { - switch { - case (v & 0xFFFFFFFFFFFFFF00) == 0: - return 1 - case (v & 0xFFFFFFFFFFFF0000) == 0: - return 2 - case (v & 0xFFFFFFFFFF000000) == 0: - return 3 - case (v & 0xFFFFFFFF00000000) == 0: - return 4 - case (v & 0xFFFFFF0000000000) == 0: - return 5 - case (v & 0xFFFF000000000000) == 0: - return 6 - case (v & 0xFF00000000000000) == 0: - return 7 - default: - return 8 - } -} - -func varintLength32(v uint32) int { - switch { - case (v & 0xFFFFFF00) == 0: - return 1 - case (v & 0xFFFF0000) == 0: - return 2 - case (v & 0xFF000000) == 0: - return 3 - default: - return 4 - } -} - -const ( - rawKSUID = 0 - timeDelta = (1 << 6) - payloadDelta = (1 << 7) - payloadRange = (1 << 6) | (1 << 7) -) - -// CompressedSetIter is an iterator type returned by Set.Iter to produce the -// list of KSUIDs stored in a set. -// -// Here's is how the iterator type is commonly used: -// -// for it := set.Iter(); it.Next(); { -// id := it.KSUID -// // ... -// } -// -// CompressedSetIter values are not safe to use concurrently from multiple -// goroutines. -type CompressedSetIter struct { - // KSUID is modified by calls to the Next method to hold the KSUID loaded - // by the iterator. - KSUID KSUID - - content []byte - offset int - - seqlength uint64 - timestamp uint32 - lastValue uint128 -} - -// Next moves the iterator forward, returning true if there a KSUID was found, -// or false if the iterator as reached the end of the set it was created from. -func (it *CompressedSetIter) Next() bool { - if it.seqlength != 0 { - value := incr128(it.lastValue) - it.KSUID = value.ksuid(it.timestamp) - it.seqlength-- - it.lastValue = value - return true - } - - if it.offset == len(it.content) { - return false - } - - b := it.content[it.offset] - it.offset++ - - const mask = rawKSUID | timeDelta | payloadDelta | payloadRange - tag := int(b) & mask - cnt := int(b) & ^mask - - switch tag { - case rawKSUID: - off0 := it.offset - off1 := off0 + byteLength - - copy(it.KSUID[:], it.content[off0:off1]) - - it.offset = off1 - it.timestamp = it.KSUID.Timestamp() - it.lastValue = uint128Payload(it.KSUID) - - case timeDelta: - off0 := it.offset - off1 := off0 + cnt - off2 := off1 + payloadLengthInBytes - - it.timestamp += varint32(it.content[off0:off1]) - - binary.BigEndian.PutUint32(it.KSUID[:timestampLengthInBytes], it.timestamp) - copy(it.KSUID[timestampLengthInBytes:], it.content[off1:off2]) - - it.offset = off2 - it.lastValue = uint128Payload(it.KSUID) - - case payloadDelta: - off0 := it.offset - off1 := off0 + cnt - - delta := varint128(it.content[off0:off1]) - value := add128(it.lastValue, delta) - - it.KSUID = value.ksuid(it.timestamp) - it.offset = off1 - it.lastValue = value - - case payloadRange: - off0 := it.offset - off1 := off0 + cnt - - value := incr128(it.lastValue) - it.KSUID = value.ksuid(it.timestamp) - it.seqlength = varint64(it.content[off0:off1]) - it.offset = off1 - it.seqlength-- - it.lastValue = value - - default: - panic("KSUID set iterator is reading malformed data") - } - - return true -} diff --git a/hotelReservation/vendor/github.com/segmentio/ksuid/uint128.go b/hotelReservation/vendor/github.com/segmentio/ksuid/uint128.go deleted file mode 100644 index 88f46bd86..000000000 --- a/hotelReservation/vendor/github.com/segmentio/ksuid/uint128.go +++ /dev/null @@ -1,136 +0,0 @@ -package ksuid - -import "fmt" - -// uint128 represents an unsigned 128 bits little endian integer. -type uint128 [2]uint64 - -func uint128Payload(ksuid KSUID) uint128 { - return makeUint128FromPayload(ksuid[timestampLengthInBytes:]) -} - -func makeUint128(high uint64, low uint64) uint128 { - return uint128{low, high} -} - -func makeUint128FromPayload(payload []byte) uint128 { - return uint128{ - // low - uint64(payload[8])<<56 | - uint64(payload[9])<<48 | - uint64(payload[10])<<40 | - uint64(payload[11])<<32 | - uint64(payload[12])<<24 | - uint64(payload[13])<<16 | - uint64(payload[14])<<8 | - uint64(payload[15]), - // high - uint64(payload[0])<<56 | - uint64(payload[1])<<48 | - uint64(payload[2])<<40 | - uint64(payload[3])<<32 | - uint64(payload[4])<<24 | - uint64(payload[5])<<16 | - uint64(payload[6])<<8 | - uint64(payload[7]), - } -} - -func (v uint128) ksuid(timestamp uint32) KSUID { - return KSUID{ - // time - byte(timestamp >> 24), - byte(timestamp >> 16), - byte(timestamp >> 8), - byte(timestamp), - - // high - byte(v[1] >> 56), - byte(v[1] >> 48), - byte(v[1] >> 40), - byte(v[1] >> 32), - byte(v[1] >> 24), - byte(v[1] >> 16), - byte(v[1] >> 8), - byte(v[1]), - - // low - byte(v[0] >> 56), - byte(v[0] >> 48), - byte(v[0] >> 40), - byte(v[0] >> 32), - byte(v[0] >> 24), - byte(v[0] >> 16), - byte(v[0] >> 8), - byte(v[0]), - } -} - -func (v uint128) bytes() [16]byte { - return [16]byte{ - // high - byte(v[1] >> 56), - byte(v[1] >> 48), - byte(v[1] >> 40), - byte(v[1] >> 32), - byte(v[1] >> 24), - byte(v[1] >> 16), - byte(v[1] >> 8), - byte(v[1]), - - // low - byte(v[0] >> 56), - byte(v[0] >> 48), - byte(v[0] >> 40), - byte(v[0] >> 32), - byte(v[0] >> 24), - byte(v[0] >> 16), - byte(v[0] >> 8), - byte(v[0]), - } -} - -func (v uint128) String() string { - return fmt.Sprintf("0x%016X%016X", v[0], v[1]) -} - -const wordBitSize = 64 - -func cmp128(x, y uint128) int { - for i := len(x); i != 0; { - i-- - switch { - case x[i] < y[i]: - return -1 - case x[i] > y[i]: - return +1 - } - } - return 0 -} - -func add128(x, y uint128) (z uint128) { - var c uint64 - for i, xi := range x { - yi := y[i] - zi := xi + yi + c - z[i] = zi - c = (xi&yi | (xi|yi)&^zi) >> (wordBitSize - 1) - } - return -} - -func sub128(x, y uint128) (z uint128) { - var c uint64 - for i, xi := range x { - yi := y[i] - zi := xi - yi - c - z[i] = zi - c = (yi&^xi | (yi|^xi)&zi) >> (wordBitSize - 1) - } - return -} - -func incr128(x uint128) uint128 { - return add128(x, uint128{1, 0}) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/.travis.yml b/hotelReservation/vendor/github.com/uber/jaeger-client-go/.travis.yml index d1cf0a524..435aea1d5 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/.travis.yml +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/.travis.yml @@ -7,17 +7,26 @@ dist: trusty matrix: include: - - go: 1.7 + # - go: 1.15.x + # env: + # - TESTS=true + # - USE_DEP=true + # - COVERAGE=true + - go: 1.15.x env: - - TESTS=true - - COVERAGE=true - - go: 1.7 - env: - - CROSSDOCK=true - - go: 1.7 - env: - - TESTS=true - USE_DEP=true + - CROSSDOCK=true + # - go: 1.15.x + # env: + # - TESTS=true + # - USE_DEP=false + # - USE_GLIDE=true + # test with previous version of Go + # - go: 1.14.x + # env: + # - TESTS=true + # - USE_DEP=true + # - CI_SKIP_LINT=true services: - docker @@ -25,7 +34,6 @@ services: env: global: - DOCKER_COMPOSE_VERSION=1.8.0 - - GO15VENDOREXPERIMENT=1 - COMMIT=${TRAVIS_COMMIT::8} # DOCKER_PASS - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk=" @@ -33,7 +41,7 @@ env: - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0=" install: - - make install-ci + - make install-ci USE_DEP=$USE_DEP - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi script: diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/hotelReservation/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md index d55a37304..964a4049c 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md @@ -1,6 +1,248 @@ Changes by Version ================== +2.30.0 (2021-12-07) +------------------- +- Add deprecation notice -- Yuri Shkuro +- Use public struct for tracer options to document initialization better (#605) -- Yuri Shkuro +- Remove redundant newline in NewReporter init message (#603) -- wwade +- [zipkin] Encode span IDs as full 16-hex strings #601 -- Nathan +- [docs] Replace godoc.org with pkg.go.dev (#591) -- Aaron Jheng +- Remove outdated reference to Zipkin model. -- Yuri Shkuro +- Move thrift compilation to a script (#590) -- Aaron Jheng +- Document JAEGER_TRACEID_128BIT env var -- Yuri Shkuro + +2.29.1 (2021-05-24) +------------------- +- Remove dependency on "testing" in "thrift" (#586) -- @yurishkuro + + +2.29.0 (2021-05-20) +------------------- +- Update vendored thrift to 0.14.1 (#584) -- @nhatthm + + +2.28.0 (2021-04-30) +------------------- +- HTTPSamplingStrategyFetcher: Use http client with 10 second timeout (#578) -- Joe Elliott + + +2.27.0 (2021-04-19) +------------------- +- Don't override HTTP Reporter batch size to 1; default to 100, user can override (#571) -- R. Aidan Campbell + + +2.26.0 (2021-04-16) +------------------- +- Delete a baggage item when value is blank (#562) -- evan.kim +- Trim baggage key when parsing (#566) -- sicong.huang +- feat: extend configuration to support custom randomNumber func (#555) -- NemoO_o +- Support JAEGER_TRACEID_128BIT env var (#547) -- Yuri Shkuro +- Additional context protections (#544) -- Joe Elliott +- Lock RemotelyControlledSampler.sampler on callbacks (#543) -- Dima +- Upgrade build to Go 1.15 (#539) -- Yuri Shkuro +- Upgrade to jaeger-lib@2.3.0 to fix broken codahale/hdrhistogram dependency (#537) -- Yuri Shkuro +- Prefix TraceID/SpanID.String() with zeroes (#533) -- Lukas Vogel +- Upgrade to OpenTracing Go 1.2 (#525) -- Yuri Shkuro + + +2.25.0 (2020-07-13) +------------------- +## Breaking changes +- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster + + The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments. + The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct, + or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable. + +## Bug fixes +- Do not add invalid context to references (#521) -- Yuri Shkuro + + +2.24.0 (2020-06-14) +------------------- +- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher +- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima +- Override reporter config only when agent host/port is set in env (#513) -- ilylia +- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song + + +2.23.1 (2020-04-28) +------------------- +- Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj + + +2.23.0 (2020-04-22) +------------------- + +- Add the ability to log all span interactions at a new debug log level([#502](https://github.com/jaegertracing/jaeger-client-go/pull/502), [#503](https://github.com/jaegertracing/jaeger-client-go/pull/503), [#504](https://github.com/jaegertracing/jaeger-client-go/pull/504)) -- Prithvi Raj +- Chore (docs): fix typos ([#496](https://github.com/jaegertracing/jaeger-client-go/pull/496), [#498](https://github.com/jaegertracing/jaeger-client-go/pull/498)) -- Febrian Setianto and Ivan Babrou +- Unset highest bit of traceID in probabilistic sampler ([#490](https://github.com/jaegertracing/jaeger-client-go/pull/490)) -- Sokolov Yura + +2.22.1 (2020-01-16) +------------------- + +- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro + + +2.22.0 (2020-01-15) +------------------- + +- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro +- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura + + +2.21.1 (2019-12-20) +------------------- + +- Update version correctly. + + +2.21.0 (2019-12-20) +------------------- + +- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro +- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro +- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud +- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh + + +2.20.1 (2019-11-08) +------------------- + +Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468 + +- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467 +- Create `OperationNameLateBinding` sampler option and config option +- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc + + +2.20.0 (2019-11-06) +------------------- + +## New Features + +- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj + + Sampling state is shared between all spans of the trace that are still in memory. + This allows implementation of delayed sampling decisions (see below). + +- Support delayed sampling decisions (#449) -- Yuri Shkuro + + This is a large structural change to how the samplers work. + It allows some samplers to be executed multiple times on different + span events (like setting a tag) and make a positive sampling decision + later in the span life cycle, or even based on children spans. + See [README](./README.md#delayed-sampling) for more details. + + There is a related minor change in behavior of the adaptive (per-operation) sampler, + which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the + operation used to make the sampling decision is always the one provided at span creation. + +- Add experimental tag matching sampler (#452) -- Yuri Shkuro + + A sampler that can sample a trace based on a certain tag added to the root + span or one of its local (in-process) children. The sampler can be used with + another experimental `PrioritySampler` that allows multiple samplers to try + to make a sampling decision, in a certain priority order. + +- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta +- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy + +## Minor patches + +- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro +- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro +- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi +- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro + +2.19.0 (2019-09-23) +------------------- + +- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro + + +2.18.1 (2019-09-16) +------------------- + +- Remove go.mod / go.sum that interfere with `go get` (#432) + + +2.18.0 (2019-09-09) +------------------- + +- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423) + + +2.17.0 (2019-08-30) +------------------- + +- Add a flag for firehose mode (#419) +- Default sampling server URL to agent (#414) +- Update default sampling rate when sampling strategy is refreshed (#413) +- Support "Self" Span Reference (#411) +- Don't complain about blank service name if tracing is Disabled (#410) Yuri +- Use IP address from tag if exist (#402) +- Expose span data to custom reporters [fixes #394] (#399) +- Fix the span allocation in the pool (#381) + + +2.16.0 (2019-03-24) +------------------- + +- Add baggage to B3 codec (#319) +- Add support for 128bit trace ids to zipkin thrift spans. (#378) +- Update zipkin propagation logic to support 128bit traceIDs (#373) +- Accept "true" for the x-b3-sampled header (#356) + +- Allow setting of PoolSpans from Config object (#322) +- Make propagators public to allow wrapping (#379) +- Change default metric namespace to use relevant separator for the metric backend (#364) +- Change metrics prefix to jaeger_tracer and add descriptions (#346) +- Bump OpenTracing to ^1.1.x (#383) +- Upgrade jaeger-lib to v2.0.0 (#359) +- Avoid defer when generating random number (#358) +- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) +- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) + + +2.15.0 (2018-10-10) +------------------- + +- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) +- Make maximum annotation length configurable in tracer options (#318) +- Support more environment variables in configuration (#323) +- Print error on Sampler Query failure (#328) +- Add an HTTPOption to support custom http.RoundTripper (#333) +- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) + + +2.14.0 (2018-04-30) +------------------- + +- Support throttling for debug traces (#274) +- Remove dependency on Apache Thrift (#303) +- Remove dependency on tchannel (#295) (#294) +- Test with Go 1.9 (#298) + + +2.13.0 (2018-04-15) +------------------- + +- Use value receiver for config.NewTracer() (#283) +- Lock span during jaeger thrift conversion (#273) +- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) +- Added support for client configuration via env vars (#275) +- Allow overriding sampler in the Config (#270) + + +2.12.0 (2018-03-14) +------------------- + +- Use lock when retrieving span.Context() (#268) +- Add Configuration support for custom Injector and Extractor (#263) + + 2.11.2 (2018-01-12) ------------------- diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/CODEOWNERS b/hotelReservation/vendor/github.com/uber/jaeger-client-go/CODEOWNERS new file mode 100644 index 000000000..0572efcd4 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/CODEOWNERS @@ -0,0 +1,2 @@ + +* @jaegertracing/jaeger-maintainers diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/hotelReservation/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md index 7bf077d6f..41e2154cf 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md @@ -19,7 +19,7 @@ file for details. ## Getting Started -This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies. +This library uses [dep](https://golang.github.io/dep/) to manage dependencies. To get started, make sure you clone the Git repository into the correct location `github.com/uber/jaeger-client-go` relative to `$GOPATH`: @@ -29,13 +29,13 @@ mkdir -p $GOPATH/src/github.com/uber cd $GOPATH/src/github.com/uber git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go cd jaeger-client-go +git submodule update --init --recursive ``` Then install dependencies and run the tests: ``` -git submodule update --init --recursive -glide install +make install make test ``` @@ -45,13 +45,13 @@ This projects follows the following pattern for grouping imports in Go files: * imports from standard library * imports from other projects * imports from `jaeger-client-go` project - + For example: ```go import ( "fmt" - + "github.com/uber/jaeger-lib/metrics" "go.uber.org/zap" @@ -73,8 +73,14 @@ pull request is most likely to be accepted if it: * Follows the guidelines in [Effective Go](https://golang.org/doc/effective_go.html) and the [Go team's common code review comments](https://github.com/golang/go/wiki/CodeReviewComments). -* Has a [good commit - message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). +* Has a [good commit message](https://chris.beams.io/posts/git-commit/): + * Separate subject from body with a blank line + * Limit the subject line to 50 characters + * Capitalize the subject line + * Do not end the subject line with a period + * Use the imperative mood in the subject line + * Wrap the body at 72 characters + * Use the body to explain _what_ and _why_ instead of _how_ * Each commit must be signed by the author ([see below](#sign-your-work)). ## License diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/hotelReservation/vendor/github.com/uber/jaeger-client-go/Gopkg.lock index 2b3b1301b..268289bb4 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/Gopkg.lock +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/Gopkg.lock @@ -2,134 +2,300 @@ [[projects]] - name = "github.com/apache/thrift" - packages = ["lib/go/thrift"] - revision = "b2a4d4ae21c789b689dd162deb819665567f481c" - version = "0.10.0" + digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113" + name = "github.com/HdrHistogram/hdrhistogram-go" + packages = ["."] + pruneopts = "UT" + revision = "3a0bb77429bd3a61596f5e8a3172445844342120" + version = "0.9.0" [[projects]] - branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] - revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" - -[[projects]] - branch = "master" - name = "github.com/codahale/hdrhistogram" - packages = ["."] - revision = "3a0bb77429bd3a61596f5e8a3172445844342120" + pruneopts = "UT" + revision = "37c8de3658fcb183f997c4e13e8337516ab753e6" + version = "v1.0.1" [[projects]] branch = "master" + digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277" name = "github.com/crossdock/crossdock-go" - packages = [".","assert","require"] + packages = [ + ".", + "assert", + "require", + ] + pruneopts = "UT" revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361" [[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" [[projects]] - branch = "master" + digest = "1:7ae311278f7ccaa724de8f2cdec0a507ba3ee6dea8c77237e8157bcf64b0f28b" + name = "github.com/golang/mock" + packages = ["gomock"] + pruneopts = "UT" + revision = "f7b1909c82a8958747e5c87c6a5c3b2eaed8a33d" + version = "v1.4.4" + +[[projects]] + digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985" name = "github.com/golang/protobuf" - packages = ["proto"] - revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "UT" + revision = "d04d7b157bb510b1e0c10132224b616ac0e26b17" + version = "v1.4.2" [[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" - version = "v1.0.0" + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" [[projects]] + digest = "1:fe5217d44ae8fb84f711968816fe50077cea9dfa8f44425b8e44e7e3de896d01" name = "github.com/opentracing/opentracing-go" - packages = [".","ext","log"] - revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" - version = "v1.0.2" + packages = [ + ".", + "ext", + "harness", + "log", + ] + pruneopts = "UT" + revision = "d34af3eaa63c4d08ab54863a4bdd0daa45212e12" + version = "v1.2.0" [[projects]] + digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "UT" + revision = "614d223910a179a466c1767a985424175c39b465" + version = "v0.9.1" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" name = "github.com/pmezard/go-difflib" packages = ["difflib"] + pruneopts = "UT" revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" [[projects]] + digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad" name = "github.com/prometheus/client_golang" - packages = ["prometheus"] - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" + packages = [ + "prometheus", + "prometheus/internal", + ] + pruneopts = "UT" + revision = "170205fb58decfd011f1550d4cfb737230d7ae4f" + version = "v1.1.0" [[projects]] - branch = "master" + digest = "1:0db23933b8052702d980a3f029149b3f175f7c0eea0cff85b175017d0f2722c0" name = "github.com/prometheus/client_model" packages = ["go"] - revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + pruneopts = "UT" + revision = "7bc5445566f0fe75b15de23e6b93886e982d7bf9" + version = "v0.2.0" [[projects]] - branch = "master" + digest = "1:4407525bde4e6ad9c1f60113d38cbb255d769e0ea506c8cf877db39da7753b3a" name = "github.com/prometheus/common" - packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"] - revision = "2e54d0b93cba2fd133edc32211dcc32c06ef72ca" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "317b7b125e8fddda956d0c9574e5f03f438ed5bc" + version = "v0.14.0" [[projects]] - branch = "master" + digest = "1:b2268435af85ee1a0fca0e37de4225f78e2d9d8b0b66acde3a29f127634efa87" name = "github.com/prometheus/procfs" - packages = [".","xfs"] - revision = "b15cd069a83443be3154b719d0cc9fe8117f09fb" + packages = [ + ".", + "internal/fs", + "internal/util", + ] + pruneopts = "UT" + revision = "9dece15c53cd5e9fbfbd72d5108adcf526a3f486" + version = "v0.2.0" [[projects]] - name = "github.com/stretchr/testify" - packages = ["assert","require","suite"] - revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c" - version = "v1.2.0" - -[[projects]] - name = "github.com/uber-go/atomic" + digest = "1:86ff4af7b6bb3d27c2e89b5ef8c139678acff1cad74a3c5235fc5af6b94fcc9e" + name = "github.com/stretchr/objx" packages = ["."] - revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8" - version = "v1.3.1" + pruneopts = "UT" + revision = "35313a95ee26395aa17d366c71a2ccf788fa69b6" + version = "v0.3.0" [[projects]] - name = "github.com/uber/jaeger-lib" - packages = ["metrics","metrics/prometheus","metrics/testutils"] - revision = "5519f3beabf28707fca8d3f47f9cff87dad48d96" - version = "v1.3.0" + digest = "1:5201127841a78d84d0ca68a2e564c08e3882c0fb9321a75997ce87926e0d63ea" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "mock", + "require", + "suite", + ] + pruneopts = "UT" + revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8" + version = "v1.6.1" [[projects]] - name = "github.com/uber/tchannel-go" - packages = [".","internal/argreader","relay","thrift","thrift/gen-go/meta","tnet","tos","trand","typed"] - revision = "7bcb33f4bb908c21374f8bfdc9d4b1ebcbb8dab0" - version = "v1.8.1" + digest = "1:4af46f2faea30e52c96ec9ec32bb654d2729579a80d242b0acfa193ad321eb61" + name = "github.com/uber/jaeger-lib" + packages = [ + "metrics", + "metrics/metricstest", + "metrics/prometheus", + ] + pruneopts = "UT" + revision = "48cc1df63e6be0d63b95677f0d22beb880bce1e4" + version = "v2.3.0" [[projects]] + digest = "1:7a3de4371d6b68c6f37a0df2c09905664d9de59026c91cbe275aae55f4fe760f" name = "go.uber.org/atomic" packages = ["."] - revision = "54f72d32435d760d5604f17a82e2435b28dc4ba5" - version = "v1.3.0" + pruneopts = "UT" + revision = "12f27ba2637fa0e13772a4f05fa46a5d18d53182" + version = "v1.7.0" [[projects]] + digest = "1:e9eeeabfd025a5e69b9c8e2857d3517ea67e747ae913bcb0a9e1e7bafdb9c298" name = "go.uber.org/multierr" packages = ["."] - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" + pruneopts = "UT" + revision = "3114a8b704d2d28dbacda34a872690aaef66aeed" + version = "v1.6.0" [[projects]] + digest = "1:0398f5f0e2e9233f25fad702f3b323241daf9f876cc869ab259238cf1bced236" name = "go.uber.org/zap" - packages = [".","buffer","internal/bufferpool","internal/color","internal/exit","zapcore"] - revision = "35aad584952c3e7020db7b839f6b102de6271f89" - version = "v1.7.1" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore", + "zaptest/observer", + ] + pruneopts = "UT" + revision = "404189cf44aea95b0cd9bddcb0242dd4cf88c510" + version = "v1.16.0" [[projects]] branch = "master" + digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd" name = "golang.org/x/net" - packages = ["bpf","context","context/ctxhttp","internal/iana","internal/socket","ipv4","ipv6"] - revision = "434ec0c7fe3742c984919a691b2018a6e9694425" + packages = [ + "context", + "context/ctxhttp", + ] + pruneopts = "UT" + revision = "328152dc79b1547da63f950cd4cdd9afd50b2774" + +[[projects]] + branch = "master" + digest = "1:1e581fa394685ef0d84008ae04cf3414390c1a700c04846853869cb4ac2fec86" + name = "golang.org/x/sys" + packages = [ + "internal/unsafeheader", + "unix", + "windows", + ] + pruneopts = "UT" + revision = "d9f96fdee20d1e5115ee34ba4016eae6cfb66eb9" + +[[projects]] + digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1" + name = "google.golang.org/protobuf" + packages = [ + "encoding/prototext", + "encoding/protowire", + "internal/descfmt", + "internal/descopts", + "internal/detrand", + "internal/encoding/defval", + "internal/encoding/messageset", + "internal/encoding/tag", + "internal/encoding/text", + "internal/errors", + "internal/fieldsort", + "internal/filedesc", + "internal/filetype", + "internal/flags", + "internal/genid", + "internal/impl", + "internal/mapsort", + "internal/pragma", + "internal/set", + "internal/strs", + "internal/version", + "proto", + "reflect/protoreflect", + "reflect/protoregistry", + "runtime/protoiface", + "runtime/protoimpl", + "types/known/anypb", + "types/known/durationpb", + "types/known/timestamppb", + ] + pruneopts = "UT" + revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1" + version = "v1.25.0" + +[[projects]] + branch = "v3" + digest = "1:229cb0f6192914f518cc1241ede6d6f1f458b31debfa18bf3a5c9e4f7b01e24b" + name = "gopkg.in/yaml.v3" + packages = ["."] + pruneopts = "UT" + revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "7a06a767b8089d38c4676b92b49cab2020b7507e7d5ea2738533c9a9865782ad" + input-imports = [ + "github.com/crossdock/crossdock-go", + "github.com/golang/mock/gomock", + "github.com/opentracing/opentracing-go", + "github.com/opentracing/opentracing-go/ext", + "github.com/opentracing/opentracing-go/harness", + "github.com/opentracing/opentracing-go/log", + "github.com/pkg/errors", + "github.com/prometheus/client_golang/prometheus", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/mock", + "github.com/stretchr/testify/require", + "github.com/stretchr/testify/suite", + "github.com/uber/jaeger-lib/metrics", + "github.com/uber/jaeger-lib/metrics/metricstest", + "github.com/uber/jaeger-lib/metrics/prometheus", + "go.uber.org/atomic", + "go.uber.org/zap", + "go.uber.org/zap/zapcore", + "go.uber.org/zap/zaptest/observer", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/hotelReservation/vendor/github.com/uber/jaeger-client-go/Gopkg.toml index ac3e10fcb..3aa307a90 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/Gopkg.toml +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/Gopkg.toml @@ -1,35 +1,31 @@ -[[constraint]] - name = "github.com/apache/thrift" - version = ">=0.9.3, <0.11.0" - [[constraint]] name = "github.com/crossdock/crossdock-go" + branch = "master" [[constraint]] name = "github.com/opentracing/opentracing-go" - version = "^1" + version = "^1.2" [[constraint]] name = "github.com/prometheus/client_golang" - version = "0.8.0" + version = "^1" [[constraint]] name = "github.com/stretchr/testify" version = "^1.1.3" [[constraint]] - name = "github.com/uber-go/atomic" + name = "go.uber.org/atomic" + version = "^1" [[constraint]] name = "github.com/uber/jaeger-lib" - version = "^1.3" - -[[constraint]] - name = "github.com/uber/tchannel-go" - version = "^1.1.0" + version = "^2.3" [[constraint]] name = "go.uber.org/zap" + version = "^1" -[[constraint]] - name = "golang.org/x/net" +[prune] + go-tests = true + unused-packages = true diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/Makefile b/hotelReservation/vendor/github.com/uber/jaeger-client-go/Makefile index f1231a8db..ee7b21268 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/Makefile +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/Makefile @@ -1,14 +1,15 @@ PROJECT_ROOT=github.com/uber/jaeger-client-go -PACKAGES := $(shell glide novendor | grep -v ./thrift-gen/...) +export GO111MODULE=off +PACKAGES := . $(shell GO111MODULE=off go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u) # all .go files that don't exist in hidden directories -ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen \ +ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \ -e ".*/\..*" \ -e ".*/_.*" \ -e ".*/mocks.*") --include crossdock/rules.mk +USE_DEP := true -export GO15VENDOREXPERIMENT=1 +-include crossdock/rules.mk RACE=-race GOTEST=go test -v $(RACE) @@ -18,11 +19,9 @@ GOFMT=gofmt FMT_LOG=fmt.log LINT_LOG=lint.log -THRIFT_VER=0.9.3 -THRIFT_IMG=thrift:$(THRIFT_VER) -THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift -THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift" -THRIFT_GEN_DIR=thrift-gen +THRIFT_VER=0.14 +THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER) +THRIFT=docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) thrift PASS=$(shell printf "\033[32mPASS\033[0m") FAIL=$(shell printf "\033[31mFAIL\033[0m") @@ -35,6 +34,9 @@ test-and-lint: test fmt lint .PHONY: test test: +ifeq ($(USE_DEP),true) + dep check +endif bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)" .PHONY: fmt @@ -43,31 +45,51 @@ fmt: ./scripts/updateLicenses.sh .PHONY: lint -lint: +lint: vet golint lint-fmt lint-thrift-testing + +.PHONY: vet +vet: $(GOVET) $(PACKAGES) + +.PHONY: golint +golint: @cat /dev/null > $(LINT_LOG) @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;) @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false) + +.PHONY: lint-fmt +lint-fmt: @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG) ./scripts/updateLicenses.sh >> $(FMT_LOG) @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false) +# make sure thrift/ module does not import "testing" +.PHONY: lint-thrift-testing +lint-thrift-testing: + @cat /dev/null > $(LINT_LOG) + @(grep -rn '"testing"' thrift | grep -v README.md > $(LINT_LOG)) || true + @[ ! -s "$(LINT_LOG)" ] || (echo '"thrift" module must not import "testing", see issue #585' | cat - $(LINT_LOG) && false) .PHONY: install install: - glide --version || go get github.com/Masterminds/glide + @echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE) ifeq ($(USE_DEP),true) - dep ensure -else + dep version || make install-dep + dep ensure -vendor-only -v +endif +ifeq ($(USE_GLIDE),true) + glide --version || go get github.com/Masterminds/glide glide install endif .PHONY: cover cover: - ./scripts/cover.sh $(shell go list $(PACKAGES)) - go tool cover -html=cover.out -o cover.html + $(GOTEST) -cover -coverprofile cover.out $(PACKAGES) +.PHONY: cover-html +cover-html: cover + go tool cover -html=cover.out -o cover.html # This is not part of the regular test target because we don't want to slow it # down. @@ -75,39 +97,39 @@ cover: test-examples: make -C examples +.PHONY: thrift +thrift: idl-submodule thrift-compile + # TODO at the moment we're not generating tchan_*.go files -thrift: idl-submodule thrift-image - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift - sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift - rm -rf thrift-gen/*/*-remote - $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift - rm -rf crossdock/thrift/*/*-remote +.PHONY: thrift-compile +thrift-compile: thrift-image + docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) /data/scripts/gen-thrift.sh +.PHONY: idl-submodule idl-submodule: git submodule init git submodule update +.PHONY: thrift-image thrift-image: $(THRIFT) -version -.PHONY: install-dep-ci -install-dep-ci: - - curl -L -s https://github.com/golang/dep/releases/download/v0.3.2/dep-linux-amd64 -o $$GOPATH/bin/dep +.PHONY: install-dep +install-dep: + - curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep - chmod +x $$GOPATH/bin/dep .PHONY: install-ci -install-ci: install-dep-ci install +install-ci: install go get github.com/wadey/gocovmerge go get github.com/mattn/goveralls go get golang.org/x/tools/cmd/cover - go get github.com/golang/lint/golint + go get golang.org/x/lint/golint .PHONY: test-ci -test-ci: - @./scripts/cover.sh $(shell go list $(PACKAGES)) +test-ci: cover +ifeq ($(CI_SKIP_LINT),true) + echo 'skipping lint' +else make lint - +endif diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/README.md b/hotelReservation/vendor/github.com/uber/jaeger-client-go/README.md index 347013b09..e23912b35 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/README.md +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/README.md @@ -1,9 +1,13 @@ [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url] +# 🛑 This library is being deprecated! + +We urge all users to migrate to [OpenTelemetry](https://opentelemetry.io/). Please refer to the [notice in the documentation](https://www.jaegertracing.io/docs/latest/client-libraries/#deprecating-jaeger-clients) for details. + # Jaeger Bindings for Go OpenTracing API Instrumentation library that implements an -[OpenTracing](http://opentracing.io) Tracer for Jaeger (http://jaegertracing.io). +[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io). **IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release. * :white_check_mark: `import "github.com/uber/jaeger-client-go"` @@ -15,19 +19,26 @@ Please see [CONTRIBUTING.md](CONTRIBUTING.md). ## Installation -We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide) +### Preferred + +Add `github.com/uber/jaeger-client-go` to `go.mod`. + +### Old way + +We recommended using a dependency manager like [dep](https://golang.github.io/dep/) and [semantic versioning](http://semver.org/) when including this library into an application. For example, Jaeger backend imports this library like this: -```yaml -- package: github.com/uber/jaeger-client-go - version: ^2.7.0 +```toml +[[constraint]] + name = "github.com/uber/jaeger-client-go" + version = "2.17" ``` If you instead want to use the latest version in `master`, you can pull it via `go get`. Note that during `go get` you may see build errors due to incompatible dependencies, which is why we recommend using semantic versions for dependencies. The error may be fixed by running -`make install` (it will install `glide` if you don't have it): +`make install` (it will install `dep` if you don't have it): ```shell go get -u github.com/uber/jaeger-client-go/ @@ -38,9 +49,51 @@ make install ## Initialization -See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples) +See tracer initialization examples in [godoc](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#pkg-examples) and [config/example_test.go](./config/example_test.go). +There are two ways to create a tracer: + * Using [Configuration](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#Configuration) struct that allows declarative configuration. For example, you can populate that struct from a YAML/JSON config, or ask it to initialize itself using environment variables (see next section). + * Using [NewTracer()](https://pkg.go.dev/github.com/uber/jaeger-client-go#NewTracer) function that allows for full programmatic control of configuring the tracer using TracerOptions. + +### Environment variables + +The tracer can be initialized with values coming from environment variables, if it is +[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer) +that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv). +None of the env vars are required and all of them can be overridden via direct setting +of the property on the configuration object. + +Property| Description +--- | --- +JAEGER_SERVICE_NAME | The service name. +JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`). +JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`). +JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored. +JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint. +JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint. +JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`). +JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`). +JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`). +JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`). +JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`). +JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/. +JAEGER_SAMPLER_PARAM | The sampler parameter (number). +JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler. +JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`). +JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`). +JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`). +JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`. +JAEGER_TRACEID_128BIT | Whether to enable 128bit trace-id generation, `true` or `false`. If not enabled, the SDK defaults to 64bit trace-ids. +JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`). +JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`). + +By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and +`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces +to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is +secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment +variables. + ### Closing the tracer via `io.Closer` The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance. @@ -48,14 +101,14 @@ It is recommended to structure your `main()` so that it calls the `Close()` func before exiting, e.g. ```go -tracer, closer, err := cfg.New(...) +tracer, closer, err := cfg.NewTracer(...) defer closer.Close() ``` This is especially useful for command-line tools that enable tracing, as well as for the long-running apps that support graceful shutdown. For example, if your deployment system sends SIGTERM instead of killing the process and you trap that signal to do a graceful -exit, then having `defer closer.Closer()` ensures that all buffered spans are flushed. +exit, then having `defer closer.Close()` ensures that all buffered spans are flushed. ### Metrics & Monitoring @@ -68,7 +121,7 @@ example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metr file for the full list and descriptions of emitted metrics. The monitoring backend is represented by the `metrics.Factory` interface from package -[`"github.com/uber/jaeger-lib/metrics"`](github.com/uber/jaeger-lib/metrics). An implementation +[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation of that interface can be passed as an option to either the Configuration object or the Tracer constructor, for example: @@ -79,8 +132,9 @@ import ( ) metricsFactory := prometheus.New() - tracer, closer, err := new(config.Configuration).New( - "your-service-name", + tracer, closer, err := config.Configuration{ + ServiceName: "your-service-name", + }.NewTracer( config.Metrics(metricsFactory), ) ``` @@ -98,7 +152,7 @@ this interface can be set on the `Config` object before calling the Besides the [zap](https://github.com/uber-go/zap) implementation bundled with this package there is also a [go-kit](https://github.com/go-kit/kit) -one in the [jaeger-lib](https://github.com/uber/jaeger-lib) repository. +one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository. ## Instrumentation for Tracing @@ -149,6 +203,29 @@ are available: 1. `RateLimitingSampler` can be used to allow only a certain fixed number of traces to be sampled per second. +#### Delayed sampling + +Version 2.20 introduced the ability to delay sampling decisions in the life cycle +of the root span. It involves several features and architectural changes: + * **Shared sampling state**: the sampling state is shared across all local + (i.e. in-process) spans for a given trace. + * **New `SamplerV2` API** allows the sampler to be called at multiple points + in the life cycle of a span: + * on span creation + * on overwriting span operation name + * on setting span tags + * on finishing the span + * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler + to indicate if the negative sampling decision is final or not (positive sampling + decisions are always final). If the decision is not final, the sampler will be + called again on further span life cycle events, like setting tags. + +These new features are used in the experimental `x.TagMatchingSampler`, which +can sample a trace based on a certain tag added to the root +span or one of its local (in-process) children. The sampler can be used with +another experimental `x.PrioritySampler` that allows multiple samplers to try +to make a sampling decision, in a certain priority order. + ### Baggage Injection The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added @@ -190,7 +267,7 @@ import ( ) span := opentracing.SpanFromContext(ctx) -ext.SamplingPriority.Set(span, 1) +ext.SamplingPriority.Set(span, 1) ``` #### Via HTTP Headers @@ -221,13 +298,36 @@ by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction wi However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up. +## SelfRef + +Jaeger Tracer supports an additional [span reference][] type call `Self`, which was proposed +to the OpenTracing Specification (https://github.com/opentracing/specification/issues/81) +but not yet accepted. This allows the caller to provide an already created `SpanContext` +when starting a new span. The `Self` reference bypasses trace and span id generation, +as well as sampling decisions (i.e. the sampling bit in the `SpanContext.flags` must be +set appropriately by the caller). + +The `Self` reference supports the following use cases: + * the ability to provide externally generated trace and span IDs + * appending data to the same span from different processes, such as loading and continuing spans/traces from offline (ie log-based) storage + +Usage requires passing in a `SpanContext` and the `jaeger.Self` reference type: +``` +span := tracer.StartSpan( + "continued_span", + jaeger.SelfRef(yourSpanContext), +) +... +defer span.Finish() +``` + ## License [Apache 2.0 License](LICENSE). -[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg -[doc]: https://godoc.org/github.com/uber/jaeger-client-go +[doc-img]: https://pkg.go.dev/badge/github.com/uber/jaeger-client-go.svg +[doc]: https://pkg.go.dev/github.com/uber/jaeger-client-go [ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master [ci]: https://travis-ci.org/jaegertracing/jaeger-client-go [cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg @@ -235,3 +335,5 @@ However it is not the default propagation format, see [here](zipkin/README.md#Ne [ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg [ot-url]: http://opentracing.io [baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item +[timeunits]: https://golang.org/pkg/time/#ParseDuration +[span reference]: https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/hotelReservation/vendor/github.com/uber/jaeger-client-go/RELEASE.md index 115e49ab8..12438d841 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/RELEASE.md +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/RELEASE.md @@ -2,6 +2,7 @@ 1. Create a PR "Preparing for release X.Y.Z" against master branch * Alter CHANGELOG.md from ` (unreleased)` to ` (YYYY-MM-DD)` + * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z` 2. Create a release "Release X.Y.Z" on Github * Create Tag `vX.Y.Z` diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/config.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/config.go index 306b92fa2..06676350b 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/config.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/config.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,27 +22,50 @@ import ( "time" "github.com/opentracing/opentracing-go" + "github.com/uber/jaeger-client-go/utils" "github.com/uber/jaeger-client-go" "github.com/uber/jaeger-client-go/internal/baggage/remote" + throttler "github.com/uber/jaeger-client-go/internal/throttler/remote" "github.com/uber/jaeger-client-go/rpcmetrics" + "github.com/uber/jaeger-client-go/transport" + "github.com/uber/jaeger-lib/metrics" ) const defaultSamplingProbability = 0.001 // Configuration configures and creates Jaeger Tracer type Configuration struct { - Disabled bool `yaml:"disabled"` + // ServiceName specifies the service name to use on the tracer. + // Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME + ServiceName string `yaml:"serviceName"` + + // Disabled makes the config return opentracing.NoopTracer. + // Value can be provided by FromEnv() via the environment variable named JAEGER_DISABLED. + Disabled bool `yaml:"disabled"` + + // RPCMetrics enables generations of RPC metrics (requires metrics factory to be provided). + // Value can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS + RPCMetrics bool `yaml:"rpc_metrics"` + + // Gen128Bit instructs the tracer to generate 128-bit wide trace IDs, compatible with W3C Trace Context. + // Value can be provided by FromEnv() via the environment variable named JAEGER_TRACEID_128BIT. + Gen128Bit bool `yaml:"traceid_128bit"` + + // Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS + Tags []opentracing.Tag `yaml:"tags"` + Sampler *SamplerConfig `yaml:"sampler"` Reporter *ReporterConfig `yaml:"reporter"` Headers *jaeger.HeadersConfig `yaml:"headers"` - RPCMetrics bool `yaml:"rpc_metrics"` BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"` + Throttler *ThrottlerConfig `yaml:"throttler"` } // SamplerConfig allows initializing a non-default sampler. All fields are optional. type SamplerConfig struct { - // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote + // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE Type string `yaml:"type"` // Param is a value passed to the sampler. @@ -52,20 +75,37 @@ type SamplerConfig struct { // - for "rateLimiting" sampler, the number of spans per second // - for "remote" sampler, param is the same as for "probabilistic" // and indicates the initial sampling rate before the actual one - // is received from the mothership + // is received from the mothership. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM Param float64 `yaml:"param"` - // SamplingServerURL is the address of jaeger-agent's HTTP sampling server + // SamplingServerURL is the URL of sampling manager that can provide + // sampling strategy to this service. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT SamplingServerURL string `yaml:"samplingServerURL"` - // MaxOperations is the maximum number of operations that the sampler + // SamplingRefreshInterval controls how often the remotely controlled sampler will poll + // sampling manager for the appropriate sampling strategy. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL + SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` + + // MaxOperations is the maximum number of operations that the PerOperationSampler // will keep track of. If an operation is not tracked, a default probabilistic // sampler will be used rather than the per operation specific sampler. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS. MaxOperations int `yaml:"maxOperations"` - // SamplingRefreshInterval controls how often the remotely controlled sampler will poll - // jaeger-agent for the appropriate sampling strategy. - SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` + // Opt-in feature for applications that require late binding of span name via explicit + // call to SetOperationName when using PerOperationSampler. When this feature is enabled, + // the sampler will return retryable=true from OnCreateSpan(), thus leaving the sampling + // decision as non-final (and the span as writeable). This may lead to degraded performance + // in applications that always provide the correct span name on trace creation. + // + // For backwards compatibility this option is off by default. + OperationNameLateBinding bool `yaml:"operationNameLateBinding"` + + // Options can be used to programmatically pass additional options to the Remote sampler. + Options []jaeger.SamplerOption } // ReporterConfig configures the reporter. All fields are optional. @@ -73,19 +113,51 @@ type ReporterConfig struct { // QueueSize controls how many spans the reporter can keep in memory before it starts dropping // new spans. The queue is continuously drained by a background go-routine, as fast as spans // can be sent out of process. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE QueueSize int `yaml:"queueSize"` // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. // It is generally not useful, as it only matters for very low traffic services. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL BufferFlushInterval time.Duration // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter // and logs all submitted spans. Main Configuration.Logger must be initialized in the code // for this option to have any effect. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS LogSpans bool `yaml:"logSpans"` - // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address + // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address. + // Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT LocalAgentHostPort string `yaml:"localAgentHostPort"` + + // DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves + // the agent's hostname and reconnects if there was a change. This option only + // applies if LocalAgentHostPort is specified. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED + DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"` + + // AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname + // in order to detect address changes. This option only applies if DisableAttemptReconnecting is false. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL + AttemptReconnectInterval time.Duration + + // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL. + // Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT + CollectorEndpoint string `yaml:"collectorEndpoint"` + + // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector. + // Can be provided by FromEnv() via the environment variable named JAEGER_USER + User string `yaml:"user"` + + // Password instructs reporter to include a password for basic http authentication when sending spans to + // jaeger-collector. + // Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD + Password string `yaml:"password"` + + // HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans. + // This field takes effect only when using HTTPTransport by setting the CollectorEndpoint. + HTTPHeaders map[string]string `yaml:"http_headers"` } // BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist @@ -105,28 +177,60 @@ type BaggageRestrictionsConfig struct { RefreshInterval time.Duration `yaml:"refreshInterval"` } +// ThrottlerConfig configures the throttler which can be used to throttle the +// rate at which the client may send debug requests. +type ThrottlerConfig struct { + // HostPort of jaeger-agent's credit server. + HostPort string `yaml:"hostPort"` + + // RefreshInterval controls how often the throttler will poll jaeger-agent + // for more throttling credits. + RefreshInterval time.Duration `yaml:"refreshInterval"` + + // SynchronousInitialization determines whether or not the throttler should + // synchronously fetch credits from the agent when an operation is seen for + // the first time. This should be set to true if the client will be used by + // a short lived service that needs to ensure that credits are fetched + // upfront such that sampling or throttling occurs. + SynchronousInitialization bool `yaml:"synchronousInitialization"` +} + type nullCloser struct{} func (*nullCloser) Close() error { return nil } // New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers // before shutdown. +// +// Deprecated: use NewTracer() function func (c Configuration) New( serviceName string, options ...Option, ) (opentracing.Tracer, io.Closer, error) { - if serviceName == "" { - return nil, nil, errors.New("no service name provided") + if serviceName != "" { + c.ServiceName = serviceName } + + return c.NewTracer(options...) +} + +// NewTracer returns a new tracer based on the current configuration, using the given options, +// and a closer func that can be used to flush buffers before shutdown. +func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) { if c.Disabled { return &opentracing.NoopTracer{}, &nullCloser{}, nil } + + if c.ServiceName == "" { + return nil, nil, errors.New("no service name provided") + } + opts := applyOptions(options...) tracerMetrics := jaeger.NewMetrics(opts.metrics, nil) if c.RPCMetrics { Observer( rpcmetrics.NewObserver( - opts.metrics.Namespace("jaeger-rpc", map[string]string{"component": "jaeger"}), + opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}), rpcmetrics.DefaultNameNormalizer, ), )(&opts) // adds to c.observers @@ -141,14 +245,18 @@ func (c Configuration) New( c.Reporter = &ReporterConfig{} } - sampler, err := c.Sampler.NewSampler(serviceName, tracerMetrics) - if err != nil { - return nil, nil, err + sampler := opts.sampler + if sampler == nil { + s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics) + if err != nil { + return nil, nil, err + } + sampler = s } reporter := opts.reporter if reporter == nil { - r, err := c.Reporter.NewReporter(serviceName, tracerMetrics, opts.logger) + r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger) if err != nil { return nil, nil, err } @@ -159,14 +267,28 @@ func (c Configuration) New( jaeger.TracerOptions.Metrics(tracerMetrics), jaeger.TracerOptions.Logger(opts.logger), jaeger.TracerOptions.CustomHeaderKeys(c.Headers), - jaeger.TracerOptions.Gen128Bit(opts.gen128Bit), + jaeger.TracerOptions.PoolSpans(opts.poolSpans), jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan), + jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength), + jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling), + } + + if c.Gen128Bit || opts.gen128Bit { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Gen128Bit(true)) + } + + if opts.randomNumber != nil { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.RandomNumber(opts.randomNumber)) } for _, tag := range opts.tags { tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) } + for _, tag := range c.Tags { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) + } + for _, obs := range opts.observers { tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs)) } @@ -175,9 +297,17 @@ func (c Configuration) New( tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs)) } + for format, injector := range opts.injectors { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector)) + } + + for format, extractor := range opts.extractors { + tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor)) + } + if c.BaggageRestrictions != nil { mgr := remote.NewRestrictionManager( - serviceName, + c.ServiceName, remote.Options.Metrics(tracerMetrics), remote.Options.Logger(opts.logger), remote.Options.HostPort(c.BaggageRestrictions.HostPort), @@ -189,11 +319,27 @@ func (c Configuration) New( tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr)) } + if c.Throttler != nil { + debugThrottler := throttler.NewThrottler( + c.ServiceName, + throttler.Options.Metrics(tracerMetrics), + throttler.Options.Logger(opts.logger), + throttler.Options.HostPort(c.Throttler.HostPort), + throttler.Options.RefreshInterval(c.Throttler.RefreshInterval), + throttler.Options.SynchronousInitialization( + c.Throttler.SynchronousInitialization, + ), + ) + + tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler)) + } + tracer, closer := jaeger.NewTracer( - serviceName, + c.ServiceName, sampler, reporter, - tracerOptions...) + tracerOptions..., + ) return tracer, closer, nil } @@ -229,7 +375,7 @@ func (sc *SamplerConfig) NewSampler( return jaeger.NewProbabilisticSampler(sc.Param) } return nil, fmt.Errorf( - "Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1", + "invalid Param for probabilistic sampler; expecting value between 0 and 1, received %v", sc.Param, ) } @@ -247,25 +393,23 @@ func (sc *SamplerConfig) NewSampler( jaeger.SamplerOptions.Metrics(metrics), jaeger.SamplerOptions.InitialSampler(initSampler), jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL), + jaeger.SamplerOptions.MaxOperations(sc.MaxOperations), + jaeger.SamplerOptions.OperationNameLateBinding(sc.OperationNameLateBinding), + jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval), } - if sc.MaxOperations != 0 { - options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations)) - } - if sc.SamplingRefreshInterval != 0 { - options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval)) - } + options = append(options, sc.Options...) return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil } - return nil, fmt.Errorf("Unknown sampler type %v", sc.Type) + return nil, fmt.Errorf("unknown sampler type (%s)", sc.Type) } -// NewReporter instantiates a new reporter that submits spans to tcollector +// NewReporter instantiates a new reporter that submits spans to the collector func (rc *ReporterConfig) NewReporter( serviceName string, metrics *jaeger.Metrics, logger jaeger.Logger, ) (jaeger.Reporter, error) { - sender, err := rc.newTransport() + sender, err := rc.newTransport(logger) if err != nil { return nil, err } @@ -276,12 +420,28 @@ func (rc *ReporterConfig) NewReporter( jaeger.ReporterOptions.Logger(logger), jaeger.ReporterOptions.Metrics(metrics)) if rc.LogSpans && logger != nil { - logger.Infof("Initializing logging reporter\n") + logger.Infof("Initializing logging reporter") reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter) } return reporter, err } -func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) { - return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0) +func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) { + switch { + case rc.CollectorEndpoint != "": + httpOptions := []transport.HTTPOption{transport.HTTPHeaders(rc.HTTPHeaders)} + if rc.User != "" && rc.Password != "" { + httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password)) + } + return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil + default: + return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{ + AgentClientUDPParams: utils.AgentClientUDPParams{ + HostPort: rc.LocalAgentHostPort, + Logger: logger, + DisableAttemptReconnecting: rc.DisableAttemptReconnecting, + AttemptReconnectInterval: rc.AttemptReconnectInterval, + }, + }) + } } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/config_env.go new file mode 100644 index 000000000..0fc3c53fd --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/config_env.go @@ -0,0 +1,268 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/uber/jaeger-client-go" +) + +const ( + // environment variable names + envServiceName = "JAEGER_SERVICE_NAME" + envDisabled = "JAEGER_DISABLED" + envRPCMetrics = "JAEGER_RPC_METRICS" + envTags = "JAEGER_TAGS" + envSamplerType = "JAEGER_SAMPLER_TYPE" + envSamplerParam = "JAEGER_SAMPLER_PARAM" + envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint + envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT" + envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS" + envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" + envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE" + envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL" + envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS" + envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED" + envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL" + envEndpoint = "JAEGER_ENDPOINT" + envUser = "JAEGER_USER" + envPassword = "JAEGER_PASSWORD" + envAgentHost = "JAEGER_AGENT_HOST" + envAgentPort = "JAEGER_AGENT_PORT" + env128bit = "JAEGER_TRACEID_128BIT" +) + +// FromEnv uses environment variables to set the tracer's Configuration +func FromEnv() (*Configuration, error) { + c := &Configuration{} + return c.FromEnv() +} + +// FromEnv uses environment variables and overrides existing tracer's Configuration +func (c *Configuration) FromEnv() (*Configuration, error) { + if e := os.Getenv(envServiceName); e != "" { + c.ServiceName = e + } + + if e := os.Getenv(envRPCMetrics); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + c.RPCMetrics = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e) + } + } + + if e := os.Getenv(envDisabled); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + c.Disabled = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e) + } + } + + if e := os.Getenv(envTags); e != "" { + c.Tags = parseTags(e) + } + + if e := os.Getenv(env128bit); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + c.Gen128Bit = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", env128bit, e) + } + } + + if c.Sampler == nil { + c.Sampler = &SamplerConfig{} + } + + if s, err := c.Sampler.samplerConfigFromEnv(); err == nil { + c.Sampler = s + } else { + return nil, errors.Wrap(err, "cannot obtain sampler config from env") + } + + if c.Reporter == nil { + c.Reporter = &ReporterConfig{} + } + + if r, err := c.Reporter.reporterConfigFromEnv(); err == nil { + c.Reporter = r + } else { + return nil, errors.Wrap(err, "cannot obtain reporter config from env") + } + + return c, nil +} + +// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables +func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) { + if e := os.Getenv(envSamplerType); e != "" { + sc.Type = e + } + + if e := os.Getenv(envSamplerParam); e != "" { + if value, err := strconv.ParseFloat(e, 64); err == nil { + sc.Param = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e) + } + } + + if e := os.Getenv(envSamplingEndpoint); e != "" { + sc.SamplingServerURL = e + } else if e := os.Getenv(envSamplerManagerHostPort); e != "" { + sc.SamplingServerURL = e + } else if e := os.Getenv(envAgentHost); e != "" { + // Fallback if we know the agent host - try the sampling endpoint there + sc.SamplingServerURL = fmt.Sprintf("http://%s:%d/sampling", e, jaeger.DefaultSamplingServerPort) + } + + if e := os.Getenv(envSamplerMaxOperations); e != "" { + if value, err := strconv.ParseInt(e, 10, 0); err == nil { + sc.MaxOperations = int(value) + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e) + } + } + + if e := os.Getenv(envSamplerRefreshInterval); e != "" { + if value, err := time.ParseDuration(e); err == nil { + sc.SamplingRefreshInterval = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e) + } + } + + return sc, nil +} + +// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables +func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) { + if e := os.Getenv(envReporterMaxQueueSize); e != "" { + if value, err := strconv.ParseInt(e, 10, 0); err == nil { + rc.QueueSize = int(value) + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e) + } + } + + if e := os.Getenv(envReporterFlushInterval); e != "" { + if value, err := time.ParseDuration(e); err == nil { + rc.BufferFlushInterval = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e) + } + } + + if e := os.Getenv(envReporterLogSpans); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + rc.LogSpans = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e) + } + } + + if e := os.Getenv(envEndpoint); e != "" { + u, err := url.ParseRequestURI(e) + if err != nil { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e) + } + rc.CollectorEndpoint = u.String() + user := os.Getenv(envUser) + pswd := os.Getenv(envPassword) + if user != "" && pswd == "" || user == "" && pswd != "" { + return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword) + } + rc.User = user + rc.Password = pswd + } else { + useEnv := false + host := jaeger.DefaultUDPSpanServerHost + if e := os.Getenv(envAgentHost); e != "" { + host = e + useEnv = true + } + + port := jaeger.DefaultUDPSpanServerPort + if e := os.Getenv(envAgentPort); e != "" { + if value, err := strconv.ParseInt(e, 10, 0); err == nil { + port = int(value) + useEnv = true + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e) + } + } + if useEnv || rc.LocalAgentHostPort == "" { + rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port) + } + + if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + rc.DisableAttemptReconnecting = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e) + } + } + + if !rc.DisableAttemptReconnecting { + if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" { + if value, err := time.ParseDuration(e); err == nil { + rc.AttemptReconnectInterval = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e) + } + } + } + } + + return rc, nil +} + +// parseTags parses the given string into a collection of Tags. +// Spec for this value: +// - comma separated list of key=value +// - value can be specified using the notation ${envVar:defaultValue}, where `envVar` +// is an environment variable and `defaultValue` is the value to use in case the env var is not set +func parseTags(sTags string) []opentracing.Tag { + pairs := strings.Split(sTags, ",") + tags := make([]opentracing.Tag, 0) + for _, p := range pairs { + kv := strings.SplitN(p, "=", 2) + k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]) + + if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") { + ed := strings.SplitN(v[2:len(v)-1], ":", 2) + e, d := ed[0], ed[1] + v = os.Getenv(e) + if v == "" && d != "" { + v = d + } + } + + tag := opentracing.Tag{Key: k, Value: v} + tags = append(tags, tag) + } + + return tags +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/options.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/options.go index 76486440a..a2b9cbc28 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/options.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/config/options.go @@ -26,14 +26,21 @@ type Option func(c *Options) // Options control behavior of the client. type Options struct { - metrics metrics.Factory - logger jaeger.Logger - reporter jaeger.Reporter - contribObservers []jaeger.ContribObserver - observers []jaeger.Observer - gen128Bit bool - zipkinSharedRPCSpan bool - tags []opentracing.Tag + metrics metrics.Factory + logger jaeger.Logger + reporter jaeger.Reporter + sampler jaeger.Sampler + contribObservers []jaeger.ContribObserver + observers []jaeger.Observer + gen128Bit bool + poolSpans bool + zipkinSharedRPCSpan bool + maxTagValueLength int + noDebugFlagOnForcedSampling bool + tags []opentracing.Tag + injectors map[interface{}]jaeger.Injector + extractors map[interface{}]jaeger.Extractor + randomNumber func() uint64 } // Metrics creates an Option that initializes Metrics in the tracer, @@ -60,6 +67,13 @@ func Reporter(reporter jaeger.Reporter) Option { } } +// Sampler can be provided explicitly to override the configuration. +func Sampler(sampler jaeger.Sampler) Option { + return func(c *Options) { + c.sampler = sampler + } +} + // Observer can be registered with the Tracer to receive notifications about new Spans. func Observer(observer jaeger.Observer) Option { return func(c *Options) { @@ -67,7 +81,7 @@ func Observer(observer jaeger.Observer) Option { } } -// ContribObserver can be registered with the Tracer to recieve notifications +// ContribObserver can be registered with the Tracer to receive notifications // about new spans. func ContribObserver(observer jaeger.ContribObserver) Option { return func(c *Options) { @@ -82,6 +96,13 @@ func Gen128Bit(gen128Bit bool) Option { } } +// PoolSpans specifies whether to pool spans +func PoolSpans(poolSpans bool) Option { + return func(c *Options) { + c.poolSpans = poolSpans + } +} + // ZipkinSharedRPCSpan creates an option that enables sharing span ID between client // and server spans a la zipkin. If false, client and server spans will be assigned // different IDs. @@ -91,6 +112,21 @@ func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option { } } +// MaxTagValueLength can be provided to override the default max tag value length. +func MaxTagValueLength(maxTagValueLength int) Option { + return func(c *Options) { + c.maxTagValueLength = maxTagValueLength + } +} + +// NoDebugFlagOnForcedSampling can be used to decide whether debug flag will be set or not +// when calling span.setSamplingPriority to force sample a span. +func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option { + return func(c *Options) { + c.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling + } +} + // Tag creates an option that adds a tracer-level tag. func Tag(key string, value interface{}) Option { return func(c *Options) { @@ -98,8 +134,32 @@ func Tag(key string, value interface{}) Option { } } +// Injector registers an Injector with the given format. +func Injector(format interface{}, injector jaeger.Injector) Option { + return func(c *Options) { + c.injectors[format] = injector + } +} + +// Extractor registers an Extractor with the given format. +func Extractor(format interface{}, extractor jaeger.Extractor) Option { + return func(c *Options) { + c.extractors[format] = extractor + } +} + +// WithRandomNumber supplies a random number generator function to the Tracer used to generate trace and span IDs. +func WithRandomNumber(f func() uint64) Option { + return func(c *Options) { + c.randomNumber = f + } +} + func applyOptions(options ...Option) Options { - opts := Options{} + opts := Options{ + injectors: make(map[interface{}]jaeger.Injector), + extractors: make(map[interface{}]jaeger.Extractor), + } for _, option := range options { option(&opts) } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/constants.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/constants.go index 1f00d1c0b..35710cfef 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/constants.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/constants.go @@ -14,9 +14,15 @@ package jaeger +import ( + "fmt" + + "github.com/opentracing/opentracing-go" +) + const ( // JaegerClientVersion is the version of the client library reported as Span tag. - JaegerClientVersion = "Go-2.11.2" + JaegerClientVersion = "Go-2.30.0" // JaegerClientVersionTagKey is the name of the tag used to report client version. JaegerClientVersionTagKey = "jaeger.version" @@ -38,6 +44,9 @@ const ( // TracerIPTagKey used to report ip of the process. TracerIPTagKey = "ip" + // TracerUUIDTagKey used to report UUID of the client process. + TracerUUIDTagKey = "client-uuid" + // SamplerTypeTagKey reports which sampler was used on the root span. SamplerTypeTagKey = "sampler.type" @@ -73,4 +82,25 @@ const ( // SamplerTypeLowerBound is the type of sampler that samples // at least a fixed number of traces per second. SamplerTypeLowerBound = "lowerbound" + + // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP + DefaultUDPSpanServerHost = "localhost" + + // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP + DefaultUDPSpanServerPort = 6831 + + // DefaultSamplingServerPort is the default port to fetch sampling config from, via http + DefaultSamplingServerPort = 5778 + + // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value. + DefaultMaxTagValueLength = 256 + + // SelfRefType is a jaeger specific reference type that supports creating a span + // with an already defined context. + selfRefType opentracing.SpanReferenceType = 99 +) + +var ( + // DefaultSamplingServerURL is the default url to fetch sampling config from, via http + DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort) ) diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/context.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/context.go deleted file mode 100644 index 8b06173d9..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/context.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - flagSampled = byte(1) - flagDebug = byte(2) -) - -var ( - errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state") - errMalformedTracerStateString = errors.New("String does not match tracer state format") - - emptyContext = SpanContext{} -) - -// TraceID represents unique 128bit identifier of a trace -type TraceID struct { - High, Low uint64 -} - -// SpanID represents unique 64bit identifier of a span -type SpanID uint64 - -// SpanContext represents propagated span identity and state -type SpanContext struct { - // traceID represents globally unique ID of the trace. - // Usually generated as a random number. - traceID TraceID - - // spanID represents span ID that must be unique within its trace, - // but does not have to be globally unique. - spanID SpanID - - // parentID refers to the ID of the parent span. - // Should be 0 if the current span is a root span. - parentID SpanID - - // flags is a bitmap containing such bits as 'sampled' and 'debug'. - flags byte - - // Distributed Context baggage. The is a snapshot in time. - baggage map[string]string - - // debugID can be set to some correlation ID when the context is being - // extracted from a TextMap carrier. - // - // See JaegerDebugHeader in constants.go - debugID string -} - -// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext -func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { - for k, v := range c.baggage { - if !handler(k, v) { - break - } - } -} - -// IsSampled returns whether this trace was chosen for permanent storage -// by the sampling mechanism of the tracer. -func (c SpanContext) IsSampled() bool { - return (c.flags & flagSampled) == flagSampled -} - -// IsDebug indicates whether sampling was explicitly requested by the service. -func (c SpanContext) IsDebug() bool { - return (c.flags & flagDebug) == flagDebug -} - -// IsValid indicates whether this context actually represents a valid trace. -func (c SpanContext) IsValid() bool { - return c.traceID.IsValid() && c.spanID != 0 -} - -func (c SpanContext) String() string { - if c.traceID.High == 0 { - return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) - } - return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) -} - -// ContextFromString reconstructs the Context encoded in a string -func ContextFromString(value string) (SpanContext, error) { - var context SpanContext - if value == "" { - return emptyContext, errEmptyTracerStateString - } - parts := strings.Split(value, ":") - if len(parts) != 4 { - return emptyContext, errMalformedTracerStateString - } - var err error - if context.traceID, err = TraceIDFromString(parts[0]); err != nil { - return emptyContext, err - } - if context.spanID, err = SpanIDFromString(parts[1]); err != nil { - return emptyContext, err - } - if context.parentID, err = SpanIDFromString(parts[2]); err != nil { - return emptyContext, err - } - flags, err := strconv.ParseUint(parts[3], 10, 8) - if err != nil { - return emptyContext, err - } - context.flags = byte(flags) - return context, nil -} - -// TraceID returns the trace ID of this span context -func (c SpanContext) TraceID() TraceID { - return c.traceID -} - -// SpanID returns the span ID of this span context -func (c SpanContext) SpanID() SpanID { - return c.spanID -} - -// ParentID returns the parent span ID of this span context -func (c SpanContext) ParentID() SpanID { - return c.parentID -} - -// NewSpanContext creates a new instance of SpanContext -func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { - flags := byte(0) - if sampled { - flags = flagSampled - } - return SpanContext{ - traceID: traceID, - spanID: spanID, - parentID: parentID, - flags: flags, - baggage: baggage} -} - -// CopyFrom copies data from ctx into this context, including span identity and baggage. -// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing. -func (c *SpanContext) CopyFrom(ctx *SpanContext) { - c.traceID = ctx.traceID - c.spanID = ctx.spanID - c.parentID = ctx.parentID - c.flags = ctx.flags - if l := len(ctx.baggage); l > 0 { - c.baggage = make(map[string]string, l) - for k, v := range ctx.baggage { - c.baggage[k] = v - } - } else { - c.baggage = nil - } -} - -// WithBaggageItem creates a new context with an extra baggage item. -func (c SpanContext) WithBaggageItem(key, value string) SpanContext { - var newBaggage map[string]string - if c.baggage == nil { - newBaggage = map[string]string{key: value} - } else { - newBaggage = make(map[string]string, len(c.baggage)+1) - for k, v := range c.baggage { - newBaggage[k] = v - } - newBaggage[key] = value - } - // Use positional parameters so the compiler will help catch new fields. - return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""} -} - -// isDebugIDContainerOnly returns true when the instance of the context is only -// used to return the debug/correlation ID from extract() method. This happens -// in the situation when "jaeger-debug-id" header is passed in the carrier to -// the extract() method, but the request otherwise has no span context in it. -// Previously this would've returned opentracing.ErrSpanContextNotFound from the -// extract method, but now it returns a dummy context with only debugID filled in. -// -// See JaegerDebugHeader in constants.go -// See textMapPropagator#Extract -func (c *SpanContext) isDebugIDContainerOnly() bool { - return !c.traceID.IsValid() && c.debugID != "" -} - -// ------- TraceID ------- - -func (t TraceID) String() string { - if t.High == 0 { - return fmt.Sprintf("%x", t.Low) - } - return fmt.Sprintf("%x%016x", t.High, t.Low) -} - -// TraceIDFromString creates a TraceID from a hexadecimal string -func TraceIDFromString(s string) (TraceID, error) { - var hi, lo uint64 - var err error - if len(s) > 32 { - return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s) - } else if len(s) > 16 { - hiLen := len(s) - 16 - if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil { - return TraceID{}, err - } - if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil { - return TraceID{}, err - } - } else { - if lo, err = strconv.ParseUint(s, 16, 64); err != nil { - return TraceID{}, err - } - } - return TraceID{High: hi, Low: lo}, nil -} - -// IsValid checks if the trace ID is valid, i.e. not zero. -func (t TraceID) IsValid() bool { - return t.High != 0 || t.Low != 0 -} - -// ------- SpanID ------- - -func (s SpanID) String() string { - return fmt.Sprintf("%x", uint64(s)) -} - -// SpanIDFromString creates a SpanID from a hexadecimal string -func SpanIDFromString(s string) (SpanID, error) { - if len(s) > 16 { - return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s) - } - id, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return SpanID(0), err - } - return SpanID(id), nil -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/doc.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/doc.go index 4f5549033..fac3c09f9 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/doc.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/doc.go @@ -14,8 +14,6 @@ /* Package jaeger implements an OpenTracing (http://opentracing.io) Tracer. -It is currently using Zipkin-compatible data model and can be directly -itegrated with Zipkin backend (http://zipkin.io). For integration instructions please refer to the README: diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/glide.lock b/hotelReservation/vendor/github.com/uber/jaeger-client-go/glide.lock index 9ad7a140a..c1ec33925 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/glide.lock +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/glide.lock @@ -1,109 +1,105 @@ -hash: af5e193de27f73f5a8cef66ae2f0c013bcb9e48ecd69db4a595221f88ba99a71 -updated: 2017-11-21T19:49:03.248636345-05:00 +hash: 63bec420a22b7e5abac8c602c5cc9b66a33d6a1bfec8918eecc77fd344b759ed +updated: 2020-07-31T13:30:37.242608-04:00 imports: -- name: github.com/apache/thrift - version: b2a4d4ae21c789b689dd162deb819665567f481c - subpackages: - - lib/go/thrift - name: github.com/beorn7/perks - version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 + version: 3a771d992973f24aa725d07868b467d1ddfceafb subpackages: - quantile -- name: github.com/codahale/hdrhistogram - version: f8ad88b59a584afeee9d334eff879b104439117b +- name: github.com/HdrHistogram/hdrhistogram-go + version: 3a0bb77429bd3a61596f5e8a3172445844342120 - name: github.com/crossdock/crossdock-go version: 049aabb0122b03bc9bd30cab8f3f91fb60166361 subpackages: - assert - require - name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 + version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 subpackages: - spew +- name: github.com/golang/mock + version: 51421b967af1f557f93a59e0057aaf15ca02e29c + subpackages: + - gomock - name: github.com/golang/protobuf - version: 7cc19b78d562895b13596ddce7aafb59dd789318 + version: b5d812f8a3706043e23a9cd5babf2e5423744d30 subpackages: - proto - name: github.com/matttproud/golang_protobuf_extensions - version: c12348ce28de40eed0136aa2b644d0ee0650e56c + version: c182affec369e30f25d3eb8cd8a478dee585ae7d subpackages: - pbutil - name: github.com/opentracing/opentracing-go - version: 1949ddbfd147afd4d964a9f00b24eb291e0e7c38 + version: d34af3eaa63c4d08ab54863a4bdd0daa45212e12 subpackages: - ext + - harness - log +- name: github.com/pkg/errors + version: ba968bfe8b2f7e042a574c888954fccecfa385b4 - name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc subpackages: - difflib - name: github.com/prometheus/client_golang - version: c5b7fccd204277076155f10851dad72b76a49317 + version: 170205fb58decfd011f1550d4cfb737230d7ae4f subpackages: - prometheus + - prometheus/internal - name: github.com/prometheus/client_model - version: 6f3806018612930941127f2a7c6c453ba2c527d2 + version: fd36f4220a901265f90734c3183c5f0c91daa0b8 subpackages: - go - name: github.com/prometheus/common - version: 49fee292b27bfff7f354ee0f64e1bc4850462edf + version: 1ab4d74fc89940cfbc3c2b3a89821336cdefa119 subpackages: - expfmt - internal/bitbucket.org/ww/goautoneg - model - name: github.com/prometheus/procfs - version: a1dba9ce8baed984a2495b658c82687f8157b98f + version: 8a055596020d692cf491851e47ba3e302d9f90ce subpackages: - - xfs + - internal/fs + - internal/util - name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + version: f654a9112bbeac49ca2cd45bfbe11533c4666cf8 subpackages: - assert + - mock - require - suite - name: github.com/uber-go/atomic - version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 - subpackages: - - utils + version: 845920076a298bdb984fb0f1b86052e4ca0a281c - name: github.com/uber/jaeger-lib - version: c48167d9cae5887393dd5e61efd06a4a48b7fbb3 + version: 48cc1df63e6be0d63b95677f0d22beb880bce1e4 subpackages: - metrics + - metrics/metricstest - metrics/prometheus - - metrics/testutils -- name: github.com/uber/tchannel-go - version: cc230a2942d078a8b01f4a79895dad62e6c572f1 - subpackages: - - atomic - - internal/argreader - - relay - - thrift - - thrift/gen-go/meta - - tnet - - tos - - trace/thrift/gen-go/tcollector - - trand - - typed - name: go.uber.org/atomic - version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 + version: 845920076a298bdb984fb0f1b86052e4ca0a281c - name: go.uber.org/multierr - version: 3c4937480c32f4c13a875a1829af76c98ca3d40a + version: b587143a48b62b01d337824eab43700af6ffe222 - name: go.uber.org/zap - version: 35aad584952c3e7020db7b839f6b102de6271f89 + version: feeb9a050b31b40eec6f2470e7599eeeadfe5bdd subpackages: - buffer - internal/bufferpool - internal/color - internal/exit - zapcore + - zaptest/observer - name: golang.org/x/net - version: a337091b0525af65de94df2eb7e98bd9962dcbe2 + version: addf6b3196f61cd44ce5a76657913698c73479d0 subpackages: - - bpf - context - context/ctxhttp - - internal/iana - - internal/socket - - ipv4 - - ipv6 -testImports: [] +- name: golang.org/x/sys + version: 3e129f6d46b10b0e1da36b3deffcb55e09631b64 + subpackages: + - internal/unsafeheader + - windows +- name: gopkg.in/yaml.v3 + version: eeeca48fe7764f320e4870d231902bf9c1be2c08 +testImports: +- name: github.com/stretchr/objx + version: 35313a95ee26395aa17d366c71a2ccf788fa69b6 diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/glide.yaml b/hotelReservation/vendor/github.com/uber/jaeger-client-go/glide.yaml index c0b6bcbfb..295678c91 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/glide.yaml +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/glide.yaml @@ -1,37 +1,30 @@ package: github.com/uber/jaeger-client-go import: -- package: github.com/apache/thrift - version: ">=0.9.3, <0.11.0" - subpackages: - - lib/go/thrift - package: github.com/opentracing/opentracing-go - version: ^1 + version: ^1.2 subpackages: - ext - log -- package: golang.org/x/net - subpackages: - - context -- package: github.com/uber/tchannel-go - version: ^1.1.0 +- package: github.com/crossdock/crossdock-go +- package: github.com/uber/jaeger-lib + version: ^2.3.0 subpackages: - - atomic - - thrift - - thrift/gen-go/meta - - tnet - - trace/thrift/gen-go/tcollector - - typed + - metrics +- package: github.com/pkg/errors + version: ~0.8.0 +- package: go.uber.org/zap + source: https://github.com/uber-go/zap.git + version: ^1 +- package: github.com/uber-go/atomic + version: ^1 +- package: github.com/prometheus/client_golang + version: 1.1 +- package: github.com/prometheus/procfs + version: 0.0.6 +testImport: - package: github.com/stretchr/testify - version: ^1.1.3 subpackages: - assert - require - suite -- package: github.com/crossdock/crossdock-go -- package: github.com/uber/jaeger-lib - version: ^1.2.1 - subpackages: - - metrics -testImport: -- package: github.com/prometheus/client_golang - version: v0.8.0 +- package: github.com/golang/mock diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/header.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/header.go index 19c2c055b..5da70351d 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/header.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/header.go @@ -38,7 +38,8 @@ type HeadersConfig struct { TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"` } -func (c *HeadersConfig) applyDefaults() *HeadersConfig { +// ApplyDefaults sets missing configuration keys to default values +func (c *HeadersConfig) ApplyDefaults() *HeadersConfig { if c.JaegerBaggageHeader == "" { c.JaegerBaggageHeader = JaegerBaggageHeader } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go index a56515aca..2f58bb541 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go @@ -15,6 +15,7 @@ package remote import ( + "context" "fmt" "net/url" "sync" @@ -37,7 +38,7 @@ func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBa } } -func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) { +func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(context.Context, string) ([]*thrift.BaggageRestriction, error) { var out []*thrift.BaggageRestriction if err := utils.GetJSON(s.url, &out); err != nil { return nil, err @@ -134,7 +135,7 @@ func (m *RestrictionManager) pollManager() { } func (m *RestrictionManager) updateRestrictions() error { - restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName) + restrictions, err := m.thriftProxy.GetBaggageRestrictions(context.Background(), m.serviceName) if err != nil { m.metrics.BaggageRestrictionsUpdateFailure.Inc(1) return err diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go new file mode 100644 index 000000000..fe0bef268 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go @@ -0,0 +1,25 @@ +// Copyright (c) 2020 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reporterstats + +// ReporterStats exposes some metrics from the RemoteReporter. +type ReporterStats interface { + SpansDroppedFromQueue() int64 +} + +// Receiver can be implemented by a Transport to be given ReporterStats. +type Receiver interface { + SetReporterStats(ReporterStats) +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go new file mode 100644 index 000000000..f52c322fb --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go @@ -0,0 +1,99 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "time" + + "github.com/uber/jaeger-client-go" +) + +const ( + defaultHostPort = "localhost:5778" + defaultRefreshInterval = time.Second * 5 +) + +// Option is a function that sets some option on the Throttler +type Option func(options *options) + +// Options is a factory for all available options +var Options options + +type options struct { + metrics *jaeger.Metrics + logger jaeger.Logger + hostPort string + refreshInterval time.Duration + synchronousInitialization bool +} + +// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics. +func (options) Metrics(m *jaeger.Metrics) Option { + return func(o *options) { + o.metrics = m + } +} + +// Logger creates an Option that sets the logger used by the Throttler. +func (options) Logger(logger jaeger.Logger) Option { + return func(o *options) { + o.logger = logger + } +} + +// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits. +func (options) HostPort(hostPort string) Option { + return func(o *options) { + o.hostPort = hostPort + } +} + +// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for +// credits. +func (options) RefreshInterval(refreshInterval time.Duration) Option { + return func(o *options) { + o.refreshInterval = refreshInterval + } +} + +// SynchronousInitialization creates an Option that determines whether the throttler should synchronously +// fetch credits from the agent when an operation is seen for the first time. This should be set to true +// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront +// such that sampling or throttling occurs. +func (options) SynchronousInitialization(b bool) Option { + return func(o *options) { + o.synchronousInitialization = b + } +} + +func applyOptions(o ...Option) options { + opts := options{} + for _, option := range o { + option(&opts) + } + if opts.metrics == nil { + opts.metrics = jaeger.NewNullMetrics() + } + if opts.logger == nil { + opts.logger = jaeger.NullLogger + } + if opts.hostPort == "" { + opts.hostPort = defaultHostPort + } + if opts.refreshInterval == 0 { + opts.refreshInterval = defaultRefreshInterval + } + return opts +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go new file mode 100644 index 000000000..20f434fe4 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go @@ -0,0 +1,216 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + + "github.com/uber/jaeger-client-go" + "github.com/uber/jaeger-client-go/utils" +) + +const ( + // minimumCredits is the minimum amount of credits necessary to not be throttled. + // i.e. if currentCredits > minimumCredits, then the operation will not be throttled. + minimumCredits = 1.0 +) + +var ( + errorUUIDNotSet = errors.New("Throttler UUID must be set") +) + +type operationBalance struct { + Operation string `json:"operation"` + Balance float64 `json:"balance"` +} + +type creditResponse struct { + Balances []operationBalance `json:"balances"` +} + +type httpCreditManagerProxy struct { + hostPort string +} + +func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy { + return &httpCreditManagerProxy{ + hostPort: hostPort, + } +} + +// N.B. Operations list must not be empty. +func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) { + params := url.Values{} + params.Set("service", serviceName) + params.Set("uuid", uuid) + for _, op := range operations { + params.Add("operations", op) + } + var resp creditResponse + if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil { + return nil, errors.Wrap(err, "Failed to receive credits from agent") + } + return &resp, nil +} + +// Throttler retrieves credits from agent and uses it to throttle operations. +type Throttler struct { + options + + mux sync.RWMutex + service string + uuid atomic.Value + creditManager *httpCreditManagerProxy + credits map[string]float64 // map of operation->credits + close chan struct{} + stopped sync.WaitGroup +} + +// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle +// the service. +func NewThrottler(service string, options ...Option) *Throttler { + opts := applyOptions(options...) + creditManager := newHTTPCreditManagerProxy(opts.hostPort) + t := &Throttler{ + options: opts, + creditManager: creditManager, + service: service, + credits: make(map[string]float64), + close: make(chan struct{}), + } + t.stopped.Add(1) + go t.pollManager() + return t +} + +// IsAllowed implements Throttler#IsAllowed. +func (t *Throttler) IsAllowed(operation string) bool { + t.mux.Lock() + defer t.mux.Unlock() + value, ok := t.credits[operation] + if !ok || value == 0 { + if !ok { + // NOTE: This appears to be a no-op at first glance, but it stores + // the operation key in the map. Necessary for functionality of + // Throttler#operations method. + t.credits[operation] = 0 + } + if !t.synchronousInitialization { + t.metrics.ThrottledDebugSpans.Inc(1) + return false + } + // If it is the first time this operation is being checked, synchronously fetch + // the credits. + credits, err := t.fetchCredits([]string{operation}) + if err != nil { + // Failed to receive credits from agent, try again next time + t.logger.Error("Failed to fetch credits: " + err.Error()) + return false + } + if len(credits.Balances) == 0 { + // This shouldn't happen but just in case + return false + } + for _, opBalance := range credits.Balances { + t.credits[opBalance.Operation] += opBalance.Balance + } + } + return t.isAllowed(operation) +} + +// Close stops the throttler from fetching credits from remote. +func (t *Throttler) Close() error { + close(t.close) + t.stopped.Wait() + return nil +} + +// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote +// requests are made. +func (t *Throttler) SetProcess(process jaeger.Process) { + if process.UUID != "" { + t.uuid.Store(process.UUID) + } +} + +// N.B. This function must be called with the Write Lock +func (t *Throttler) isAllowed(operation string) bool { + credits := t.credits[operation] + if credits < minimumCredits { + t.metrics.ThrottledDebugSpans.Inc(1) + return false + } + t.credits[operation] = credits - minimumCredits + return true +} + +func (t *Throttler) pollManager() { + defer t.stopped.Done() + ticker := time.NewTicker(t.refreshInterval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + t.refreshCredits() + case <-t.close: + return + } + } +} + +func (t *Throttler) operations() []string { + t.mux.RLock() + defer t.mux.RUnlock() + operations := make([]string, 0, len(t.credits)) + for op := range t.credits { + operations = append(operations, op) + } + return operations +} + +func (t *Throttler) refreshCredits() { + operations := t.operations() + if len(operations) == 0 { + return + } + newCredits, err := t.fetchCredits(operations) + if err != nil { + t.metrics.ThrottlerUpdateFailure.Inc(1) + t.logger.Error("Failed to fetch credits: " + err.Error()) + return + } + t.metrics.ThrottlerUpdateSuccess.Inc(1) + + t.mux.Lock() + defer t.mux.Unlock() + for _, opBalance := range newCredits.Balances { + t.credits[opBalance.Operation] += opBalance.Balance + } +} + +func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) { + uuid := t.uuid.Load() + uuidStr, _ := uuid.(string) + if uuid == nil || uuidStr == "" { + return nil, errorUUIDNotSet + } + return t.creditManager.FetchCredits(uuidStr, t.service, operations) +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go new file mode 100644 index 000000000..196ed69ca --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go @@ -0,0 +1,32 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package throttler + +// Throttler is used to rate limits operations. For example, given how debug spans +// are always sampled, a throttler can be enabled per client to rate limit the amount +// of debug spans a client can start. +type Throttler interface { + // IsAllowed determines whether the operation should be allowed and not be + // throttled. + IsAllowed(operation string) bool +} + +// DefaultThrottler doesn't throttle at all. +type DefaultThrottler struct{} + +// IsAllowed implements Throttler#IsAllowed. +func (t DefaultThrottler) IsAllowed(operation string) bool { + return true +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go index 338a34582..3ac2f8f94 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go @@ -24,7 +24,10 @@ import ( ) // BuildJaegerThrift builds jaeger span based on internal span. +// TODO: (breaking change) move to internal package. func BuildJaegerThrift(span *Span) *j.Span { + span.Lock() + defer span.Unlock() startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) duration := span.duration.Nanoseconds() / int64(time.Microsecond) jaegerSpan := &j.Span{ @@ -33,10 +36,10 @@ func BuildJaegerThrift(span *Span) *j.Span { SpanId: int64(span.context.spanID), ParentSpanId: int64(span.context.parentID), OperationName: span.operationName, - Flags: int32(span.context.flags), + Flags: int32(span.context.samplingState.flags()), StartTime: startTime, Duration: duration, - Tags: buildTags(span.tags), + Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength), Logs: buildLogs(span.logs), References: buildReferences(span.references), } @@ -44,22 +47,28 @@ func BuildJaegerThrift(span *Span) *j.Span { } // BuildJaegerProcessThrift creates a thrift Process type. +// TODO: (breaking change) move to internal package. func BuildJaegerProcessThrift(span *Span) *j.Process { + span.Lock() + defer span.Unlock() return buildJaegerProcessThrift(span.tracer) } func buildJaegerProcessThrift(tracer *Tracer) *j.Process { process := &j.Process{ ServiceName: tracer.serviceName, - Tags: buildTags(tracer.tags), + Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength), + } + if tracer.process.UUID != "" { + process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING}) } return process } -func buildTags(tags []Tag) []*j.Tag { +func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag { jTags := make([]*j.Tag, 0, len(tags)) for _, tag := range tags { - jTag := buildTag(&tag) + jTag := buildTag(&tag, maxTagValueLength) jTags = append(jTags, jTag) } return jTags @@ -77,16 +86,16 @@ func buildLogs(logs []opentracing.LogRecord) []*j.Log { return jLogs } -func buildTag(tag *Tag) *j.Tag { +func buildTag(tag *Tag, maxTagValueLength int) *j.Tag { jTag := &j.Tag{Key: tag.key} switch value := tag.value.(type) { case string: - vStr := truncateString(value) + vStr := truncateString(value, maxTagValueLength) jTag.VStr = &vStr jTag.VType = j.TagType_STRING case []byte: - if len(value) > maxAnnotationLength { - value = value[:maxAnnotationLength] + if len(value) > maxTagValueLength { + value = value[:maxTagValueLength] } jTag.VBinary = value jTag.VType = j.TagType_BINARY @@ -143,7 +152,7 @@ func buildTag(tag *Tag) *j.Tag { jTag.VBool = &vBool jTag.VType = j.TagType_BOOL default: - vStr := truncateString(stringify(value)) + vStr := truncateString(stringify(value), maxTagValueLength) jTag.VStr = &vStr jTag.VType = j.TagType_STRING } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/log/logger.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/log/logger.go index b002cda55..ced6e0ce9 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/log/logger.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/log/logger.go @@ -47,13 +47,19 @@ func (l *stdLogger) Infof(msg string, args ...interface{}) { log.Printf(msg, args...) } -// NullLogger is implementation of the Logger interface that delegates to default `log` package +// Debugf logs a message at debug priority +func (l *stdLogger) Debugf(msg string, args ...interface{}) { + log.Printf(fmt.Sprintf("DEBUG: %s", msg), args...) +} + +// NullLogger is implementation of the Logger interface that is no-op var NullLogger = &nullLogger{} type nullLogger struct{} -func (l *nullLogger) Error(msg string) {} -func (l *nullLogger) Infof(msg string, args ...interface{}) {} +func (l *nullLogger) Error(msg string) {} +func (l *nullLogger) Infof(msg string, args ...interface{}) {} +func (l *nullLogger) Debugf(msg string, args ...interface{}) {} // BytesBufferLogger implements Logger backed by a bytes.Buffer. type BytesBufferLogger struct { @@ -75,9 +81,61 @@ func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) { l.mux.Unlock() } +// Debugf implements Logger. +func (l *BytesBufferLogger) Debugf(msg string, args ...interface{}) { + l.mux.Lock() + l.buf.WriteString("DEBUG: " + fmt.Sprintf(msg, args...) + "\n") + l.mux.Unlock() +} + // String returns string representation of the underlying buffer. func (l *BytesBufferLogger) String() string { l.mux.Lock() defer l.mux.Unlock() return l.buf.String() } + +// Flush empties the underlying buffer. +func (l *BytesBufferLogger) Flush() { + l.mux.Lock() + defer l.mux.Unlock() + l.buf.Reset() +} + +// DebugLogger is an interface which adds a debug logging level +type DebugLogger interface { + Logger + + // Debugf logs a message at debug priority + Debugf(msg string, args ...interface{}) +} + +// DebugLogAdapter is a log adapter that converts a Logger into a DebugLogger +// If the provided Logger doesn't satisfy the interface, a logger with debug +// disabled is returned +func DebugLogAdapter(logger Logger) DebugLogger { + if logger == nil { + return nil + } + if debugLogger, ok := logger.(DebugLogger); ok { + return debugLogger + } + logger.Infof("debug logging disabled") + return debugDisabledLogAdapter{logger: logger} +} + +type debugDisabledLogAdapter struct { + logger Logger +} + +func (d debugDisabledLogAdapter) Error(msg string) { + d.logger.Error(msg) +} + +func (d debugDisabledLogAdapter) Infof(msg string, args ...interface{}) { + d.logger.Infof(msg, args...) +} + +// Debugf is a nop +func (d debugDisabledLogAdapter) Debugf(msg string, args ...interface{}) { +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/metrics.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/metrics.go index 8c0e63213..50e4e22d6 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/metrics.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,74 +21,95 @@ import ( // Metrics is a container of all stats emitted by Jaeger tracer. type Metrics struct { // Number of traces started by this tracer as sampled - TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y"` + TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"` // Number of traces started by this tracer as not sampled - TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n"` + TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"` + + // Number of traces started by this tracer with delayed sampling + TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"` // Number of externally started sampled traces this tracer joined - TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y"` + TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"` // Number of externally started not-sampled traces this tracer joined - TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n"` + TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"` // Number of sampled spans started by this tracer - SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y"` + SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"` + + // Number of not sampled spans started by this tracer + SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"` - // Number of unsampled spans started by this tracer - SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n"` + // Number of spans with delayed sampling started by this tracer + SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"` // Number of spans finished by this tracer - SpansFinished metrics.Counter `metric:"finished_spans"` + SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"` + + // Number of spans finished by this tracer + SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"` + + // Number of spans finished by this tracer + SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"` // Number of errors decoding tracing context - DecodingErrors metrics.Counter `metric:"span_context_decoding_errors"` + DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"` // Number of spans successfully reported - ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok"` + ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"` // Number of spans not reported due to a Sender failure - ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err"` + ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"` // Number of spans dropped due to internal queue overflow - ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped"` + ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"` // Current number of spans in the reporter queue - ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length"` + ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"` // Number of times the Sampler succeeded to retrieve sampling strategy - SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok"` + SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"` // Number of times the Sampler failed to retrieve sampling strategy - SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err"` + SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"` // Number of times the Sampler succeeded to retrieve and update sampling strategy - SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok"` + SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"` // Number of times the Sampler failed to update sampling strategy - SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err"` + SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"` // Number of times baggage was successfully written or updated on spans. - BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok"` + BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"` // Number of times baggage failed to write or update on spans. - BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err"` + BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"` // Number of times baggage was truncated as per baggage restrictions. - BaggageTruncate metrics.Counter `metric:"baggage_truncations"` + BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"` // Number of times baggage restrictions were successfully updated. - BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok"` + BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"` // Number of times baggage restrictions failed to update. - BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err"` + BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"` + + // Number of times debug spans were throttled. + ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"` + + // Number of times throttler successfully updated. + ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"` + + // Number of times throttler failed to update. + ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"` } // NewMetrics creates a new Metrics struct and initializes it. func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics { m := &Metrics{} - // TODO the namespace "jaeger" should be configurable (e.g. in all-in-one "jaeger-client" would make more sense) - metrics.Init(m, factory.Namespace("jaeger", nil), globalTags) + // TODO the namespace "jaeger" should be configurable + metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags) return m } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/process.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/process.go new file mode 100644 index 000000000..30cbf9962 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/process.go @@ -0,0 +1,29 @@ +// Copyright (c) 2018 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +// Process holds process specific metadata that's relevant to this client. +type Process struct { + Service string + UUID string + Tags []Tag +} + +// ProcessSetter sets a process. This can be used by any class that requires +// the process to be set as part of initialization. +// See internal/throttler/remote/throttler.go for an example. +type ProcessSetter interface { + SetProcess(process Process) +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/propagation.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/propagation.go index abca67a3c..e06459b98 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/propagation.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/propagation.go @@ -51,15 +51,17 @@ type Extractor interface { Extract(carrier interface{}) (SpanContext, error) } -type textMapPropagator struct { +// TextMapPropagator is a combined Injector and Extractor for TextMap format +type TextMapPropagator struct { headerKeys *HeadersConfig metrics Metrics encodeValue func(string) string decodeValue func(string) string } -func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator { - return &textMapPropagator{ +// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format +func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator { + return &TextMapPropagator{ headerKeys: headerKeys, metrics: metrics, encodeValue: func(val string) string { @@ -71,8 +73,9 @@ func newTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPr } } -func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMapPropagator { - return &textMapPropagator{ +// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format +func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator { + return &TextMapPropagator{ headerKeys: headerKeys, metrics: metrics, encodeValue: func(val string) string { @@ -88,19 +91,22 @@ func newHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *textMa } } -type binaryPropagator struct { +// BinaryPropagator is a combined Injector and Extractor for Binary format +type BinaryPropagator struct { tracer *Tracer buffers sync.Pool } -func newBinaryPropagator(tracer *Tracer) *binaryPropagator { - return &binaryPropagator{ +// NewBinaryPropagator creates a combined Injector and Extractor for Binary format +func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator { + return &BinaryPropagator{ tracer: tracer, buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}, } } -func (p *textMapPropagator) Inject( +// Inject implements Injector of TextMapPropagator +func (p *TextMapPropagator) Inject( sc SpanContext, abstractCarrier interface{}, ) error { @@ -121,7 +127,8 @@ func (p *textMapPropagator) Inject( return nil } -func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { +// Extract implements Extractor of TextMapPropagator +func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { textMapReader, ok := abstractCarrier.(opentracing.TextMapReader) if !ok { return emptyContext, opentracing.ErrInvalidCarrier @@ -166,7 +173,8 @@ func (p *textMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, e return ctx, nil } -func (p *binaryPropagator) Inject( +// Inject implements Injector of BinaryPropagator +func (p *BinaryPropagator) Inject( sc SpanContext, abstractCarrier interface{}, ) error { @@ -185,7 +193,7 @@ func (p *binaryPropagator) Inject( if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil { return err } - if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil { + if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil { return err } @@ -207,12 +215,20 @@ func (p *binaryPropagator) Inject( return nil } -func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { +// W3C limits https://github.com/w3c/baggage/blob/master/baggage/HTTP_HEADER_FORMAT.md#limits +const ( + maxBinaryBaggage = 180 + maxBinaryNameValueLen = 4096 +) + +// Extract implements Extractor of BinaryPropagator +func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { carrier, ok := abstractCarrier.(io.Reader) if !ok { return emptyContext, opentracing.ErrInvalidCarrier } var ctx SpanContext + ctx.samplingState = &samplingState{} if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted @@ -223,15 +239,21 @@ func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted } - if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil { + + var flags byte + if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted } + ctx.samplingState.setFlags(flags) // Handle the baggage items var numBaggage int32 if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted } + if numBaggage > maxBinaryBaggage { + return emptyContext, opentracing.ErrSpanContextCorrupted + } if iNumBaggage := int(numBaggage); iNumBaggage > 0 { ctx.baggage = make(map[string]string, iNumBaggage) buf := p.buffers.Get().(*bytes.Buffer) @@ -252,6 +274,9 @@ func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil { return emptyContext, opentracing.ErrSpanContextCorrupted } + if keyLen+valLen > maxBinaryNameValueLen { + return emptyContext, opentracing.ErrSpanContextCorrupted + } buf.Reset() buf.Grow(int(valLen)) if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen { @@ -269,7 +294,7 @@ func (p *binaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er // is converted to map[string]string { "key1" : "value1", // "key2" : "value2", // "key3" : "value3" } -func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]string { +func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string { baggage := make(map[string]string) value, err := url.QueryUnescape(value) if err != nil { @@ -279,7 +304,7 @@ func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]stri for _, kvpair := range strings.Split(value, ",") { kv := strings.Split(strings.TrimSpace(kvpair), "=") if len(kv) == 2 { - baggage[kv[0]] = kv[1] + baggage[strings.TrimSpace(kv[0])] = kv[1] } else { log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader) } @@ -289,12 +314,12 @@ func (p *textMapPropagator) parseCommaSeparatedMap(value string) map[string]stri // Converts a baggage item key into an http header format, // by prepending TraceBaggageHeaderPrefix and encoding the key string -func (p *textMapPropagator) addBaggageKeyPrefix(key string) string { +func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string { // TODO encodeBaggageKeyAsHeader add caching and escaping return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key) } -func (p *textMapPropagator) removeBaggageKeyPrefix(key string) string { +func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string { // TODO decodeBaggageHeaderKey add caching and escaping return key[len(p.headerKeys.TraceBaggageHeaderPrefix):] } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/reporter.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/reporter.go index fe6288c4b..a71a92c3e 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/reporter.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/reporter.go @@ -22,12 +22,15 @@ import ( "github.com/opentracing/opentracing-go" + "github.com/uber/jaeger-client-go/internal/reporterstats" "github.com/uber/jaeger-client-go/log" ) // Reporter is called by the tracer when a span is completed to report the span to the tracing collector. type Reporter interface { // Report submits a new span to collectors, possibly asynchronously and/or with buffering. + // If the reporter is processing Span asynchronously then it needs to Retain() the span, + // and then Release() it when no longer needed, to avoid span data corruption. Report(span *Span) // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory. @@ -93,13 +96,14 @@ func NewInMemoryReporter() *InMemoryReporter { // Report implements Report() method of Reporter by storing the span in the buffer. func (r *InMemoryReporter) Report(span *Span) { r.lock.Lock() - r.spans = append(r.spans, span) + // Need to retain the span otherwise it will be released + r.spans = append(r.spans, span.Retain()) r.lock.Unlock() } -// Close implements Close() method of Reporter by doing nothing. +// Close implements Close() method of Reporter func (r *InMemoryReporter) Close() { - // no-op + r.Reset() } // SpansSubmitted returns the number of spans accumulated in the buffer. @@ -122,7 +126,12 @@ func (r *InMemoryReporter) GetSpans() []opentracing.Span { func (r *InMemoryReporter) Reset() { r.lock.Lock() defer r.lock.Unlock() - r.spans = nil + + // Before reset the collection need to release Span memory + for _, span := range r.spans { + span.(*Span).Release() + } + r.spans = r.spans[:0] } // ------------------------------ @@ -168,16 +177,31 @@ type reporterQueueItem struct { close *sync.WaitGroup } +// reporterStats implements reporterstats.ReporterStats. +type reporterStats struct { + droppedCount int64 // provided to Transports to report data loss to the backend +} + +// SpansDroppedFromQueue implements reporterstats.ReporterStats. +func (r *reporterStats) SpansDroppedFromQueue() int64 { + return atomic.LoadInt64(&r.droppedCount) +} + +func (r *reporterStats) incDroppedCount() { + atomic.AddInt64(&r.droppedCount, 1) +} + type remoteReporter struct { // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq - queueLength int64 + queueLength int64 // used to update metrics.Gauge closed int64 // 0 - not closed, 1 - closed reporterOptions - sender Transport - queue chan reporterQueueItem + sender Transport + queue chan reporterQueueItem + reporterStats *reporterStats } // NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender. @@ -205,6 +229,10 @@ func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter { reporterOptions: options, sender: sender, queue: make(chan reporterQueueItem, options.queueSize), + reporterStats: new(reporterStats), + } + if receiver, ok := sender.(reporterstats.Receiver); ok { + receiver.SetReporterStats(reporter.reporterStats) } go reporter.processQueue() return reporter @@ -218,21 +246,24 @@ func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter { // because some of them may still be successfully added to the queue. func (r *remoteReporter) Report(span *Span) { select { - case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span}: + // Need to retain the span otherwise it will be released + case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}: atomic.AddInt64(&r.queueLength, 1) default: r.metrics.ReporterDropped.Inc(1) + r.reporterStats.incDroppedCount() } } // Close implements Close() method of Reporter by waiting for the queue to be drained. func (r *remoteReporter) Close() { + r.logger.Debugf("closing reporter") if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped { r.logger.Error("Repeated attempt to close the reporter is ignored") return } r.sendCloseEvent() - r.sender.Close() + _ = r.sender.Close() } func (r *remoteReporter) sendCloseEvent() { @@ -254,7 +285,7 @@ func (r *remoteReporter) processQueue() { flush := func() { if flushed, err := r.sender.Flush(); err != nil { r.metrics.ReporterFailure.Inc(int64(flushed)) - r.logger.Error(fmt.Sprintf("error when flushing the buffer: %s", err.Error())) + r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error())) } else if flushed > 0 { r.metrics.ReporterSuccess.Inc(int64(flushed)) } @@ -272,12 +303,14 @@ func (r *remoteReporter) processQueue() { span := item.span if flushed, err := r.sender.Append(span); err != nil { r.metrics.ReporterFailure.Inc(int64(flushed)) - r.logger.Error(fmt.Sprintf("error reporting span %q: %s", span.OperationName(), err.Error())) + r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error())) } else if flushed > 0 { r.metrics.ReporterSuccess.Inc(int64(flushed)) // to reduce the number of gauge stats, we only emit queue length on flush r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength)) + r.logger.Debugf("flushed %d spans", flushed) } + span.Release() case reporterQueueItemClose: timer.Stop() flush() diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/reporter_options.go index 65012d701..2fc030547 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/reporter_options.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/reporter_options.go @@ -16,6 +16,8 @@ package jaeger import ( "time" + + "github.com/uber/jaeger-client-go/log" ) // ReporterOption is a function that sets some option on the reporter. @@ -31,7 +33,7 @@ type reporterOptions struct { // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full bufferFlushInterval time.Duration // logger is used to log errors of span submissions - logger Logger + logger log.DebugLogger // metrics is used to record runtime stats metrics *Metrics } @@ -64,6 +66,6 @@ func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) Re // errors of span submissions. func (reporterOptions) Logger(logger Logger) ReporterOption { return func(r *reporterOptions) { - r.logger = logger + r.logger = log.DebugLogAdapter(logger) } } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go index ab8d74c29..a8cec2fa6 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go @@ -34,7 +34,7 @@ type Metrics struct { // RequestCountFailures is a counter of the number of times any failure has been observed. RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"` - // RequestLatencySuccess is a latency histogram of succesful requests. + // RequestLatencySuccess is a latency histogram of successful requests. RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"` // RequestLatencyFailures is a latency histogram of failed requests. diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler.go index f2e8c994e..d0be8ad50 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler.go @@ -17,19 +17,15 @@ package jaeger import ( "fmt" "math" - "net/url" + "strings" "sync" - "time" - "github.com/uber/jaeger-client-go/log" "github.com/uber/jaeger-client-go/thrift-gen/sampling" "github.com/uber/jaeger-client-go/utils" ) const ( - defaultSamplingServerURL = "http://localhost:5778/sampling" - defaultSamplingRefreshInterval = time.Minute - defaultMaxOperations = 2000 + defaultMaxOperations = 2000 ) // Sampler decides whether a new trace should be sampled or not. @@ -47,9 +43,7 @@ type Sampler interface { // Equal checks if the `other` sampler is functionally equivalent // to this sampler. - // TODO remove this function. This function is used to determine if 2 samplers are equivalent - // which does not bode well with the adaptive sampler which has to create all the composite samplers - // for the comparison to occur. This is expensive to do if only one sampler has changed. + // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation. Equal(other Sampler) bool } @@ -57,17 +51,23 @@ type Sampler interface { // ConstSampler is a sampler that always makes the same decision. type ConstSampler struct { + legacySamplerV1Base Decision bool tags []Tag } // NewConstSampler creates a ConstSampler. -func NewConstSampler(sample bool) Sampler { +func NewConstSampler(sample bool) *ConstSampler { tags := []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeConst}, {key: SamplerParamTagKey, value: sample}, } - return &ConstSampler{Decision: sample, tags: tags} + s := &ConstSampler{ + Decision: sample, + tags: tags, + } + s.delegate = s.IsSampled + return s } // IsSampled implements IsSampled() of Sampler. @@ -88,11 +88,17 @@ func (s *ConstSampler) Equal(other Sampler) bool { return false } +// String is used to log sampler details. +func (s *ConstSampler) String() string { + return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision) +} + // ----------------------- // ProbabilisticSampler is a sampler that randomly samples a certain percentage // of traces. type ProbabilisticSampler struct { + legacySamplerV1Base samplingRate float64 samplingBoundary uint64 tags []Tag @@ -114,16 +120,19 @@ func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error } func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler { - samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) - tags := []Tag{ + s := new(ProbabilisticSampler) + s.delegate = s.IsSampled + return s.init(samplingRate) +} + +func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler { + s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) + s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate) + s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic}, - {key: SamplerParamTagKey, value: samplingRate}, - } - return &ProbabilisticSampler{ - samplingRate: samplingRate, - samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate), - tags: tags, + {key: SamplerParamTagKey, value: s.samplingRate}, } + return s } // SamplingRate returns the sampling probability this sampled was constructed with. @@ -133,7 +142,7 @@ func (s *ProbabilisticSampler) SamplingRate() float64 { // IsSampled implements IsSampled() of Sampler. func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return s.samplingBoundary >= id.Low, s.tags + return s.samplingBoundary >= id.Low&maxRandomNumber, s.tags } // Close implements Close() of Sampler. @@ -149,65 +158,104 @@ func (s *ProbabilisticSampler) Equal(other Sampler) bool { return false } +// Update modifies in-place the sampling rate. Locking must be done externally. +func (s *ProbabilisticSampler) Update(samplingRate float64) error { + if samplingRate < 0.0 || samplingRate > 1.0 { + return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate) + } + s.init(samplingRate) + return nil +} + +// String is used to log sampler details. +func (s *ProbabilisticSampler) String() string { + return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate) +} + // ----------------------- -type rateLimitingSampler struct { +// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows +// burstiness of the service, i.e. a service with uniformly distributed requests will have those +// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a +// number of sequential requests can be sampled each second. +type RateLimitingSampler struct { + legacySamplerV1Base maxTracesPerSecond float64 - rateLimiter utils.RateLimiter + rateLimiter *utils.ReconfigurableRateLimiter tags []Tag } -// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled -// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those -// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of -// sequential requests can be sampled each second. -func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler { - tags := []Tag{ +// NewRateLimitingSampler creates new RateLimitingSampler. +func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler { + s := new(RateLimitingSampler) + s.delegate = s.IsSampled + return s.init(maxTracesPerSecond) +} + +func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler { + if s.rateLimiter == nil { + s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) + } else { + s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) + } + s.maxTracesPerSecond = maxTracesPerSecond + s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting}, {key: SamplerParamTagKey, value: maxTracesPerSecond}, } - return &rateLimitingSampler{ - maxTracesPerSecond: maxTracesPerSecond, - rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)), - tags: tags, - } + return s } // IsSampled implements IsSampled() of Sampler. -func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { +func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { return s.rateLimiter.CheckCredit(1.0), s.tags } -func (s *rateLimitingSampler) Close() { +// Update reconfigures the rate limiter, while preserving its accumulated balance. +// Locking must be done externally. +func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) { + if s.maxTracesPerSecond != maxTracesPerSecond { + s.init(maxTracesPerSecond) + } +} + +// Close does nothing. +func (s *RateLimitingSampler) Close() { // nothing to do } -func (s *rateLimitingSampler) Equal(other Sampler) bool { - if o, ok := other.(*rateLimitingSampler); ok { +// Equal compares with another sampler. +func (s *RateLimitingSampler) Equal(other Sampler) bool { + if o, ok := other.(*RateLimitingSampler); ok { return s.maxTracesPerSecond == o.maxTracesPerSecond } return false } +// String is used to log sampler details. +func (s *RateLimitingSampler) String() string { + return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond) +} + // ----------------------- -// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and -// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that +// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and +// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that // every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound // of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes. // -// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both -// samplers return true, the tags for probabilisticSampler will be used. +// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both +// samplers return true, the tags for ProbabilisticSampler will be used. type GuaranteedThroughputProbabilisticSampler struct { probabilisticSampler *ProbabilisticSampler - lowerBoundSampler Sampler + lowerBoundSampler *RateLimitingSampler tags []Tag samplingRate float64 lowerBound float64 } // NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both -// probabilisticSampler and rateLimitingSampler. +// ProbabilisticSampler and RateLimitingSampler. func NewGuaranteedThroughputProbabilisticSampler( lowerBound, samplingRate float64, ) (*GuaranteedThroughputProbabilisticSampler, error) { @@ -224,8 +272,14 @@ func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float6 } func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) { - if s.probabilisticSampler == nil || s.samplingRate != samplingRate { + if s.probabilisticSampler == nil { s.probabilisticSampler = newProbabilisticSampler(samplingRate) + } else if s.samplingRate != samplingRate { + s.probabilisticSampler.init(samplingRate) + } + // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval + samplingRate = s.probabilisticSampler.SamplingRate() + if s.samplingRate != samplingRate || s.tags == nil { s.samplingRate = s.probabilisticSampler.SamplingRate() s.tags = []Tag{ {key: SamplerTypeTagKey, value: SamplerTypeLowerBound}, @@ -252,7 +306,7 @@ func (s *GuaranteedThroughputProbabilisticSampler) Close() { // Equal implements Equal() of Sampler. func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for + // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for // more information. return false } @@ -261,52 +315,123 @@ func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) { s.setProbabilisticSampler(samplingRate) if s.lowerBound != lowerBound { - s.lowerBoundSampler = NewRateLimitingSampler(lowerBound) + s.lowerBoundSampler.Update(lowerBound) s.lowerBound = lowerBound } } +func (s GuaranteedThroughputProbabilisticSampler) String() string { + return fmt.Sprintf("GuaranteedThroughputProbabilisticSampler(lowerBound=%f, samplingRate=%f)", s.lowerBound, s.samplingRate) +} + // ----------------------- -type adaptiveSampler struct { +// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler +// on a per-operation basis. +type PerOperationSampler struct { sync.RWMutex samplers map[string]*GuaranteedThroughputProbabilisticSampler defaultSampler *ProbabilisticSampler lowerBound float64 maxOperations int + + // see description in PerOperationSamplerParams + operationNameLateBinding bool +} + +// NewAdaptiveSampler returns a new PerOperationSampler. +// Deprecated: please use NewPerOperationSampler. +func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) { + return NewPerOperationSampler(PerOperationSamplerParams{ + MaxOperations: maxOperations, + Strategies: strategies, + }), nil } -// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and -// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all -// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler. -func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) { - return newAdaptiveSampler(strategies, maxOperations), nil +// PerOperationSamplerParams defines parameters when creating PerOperationSampler. +type PerOperationSamplerParams struct { + // Max number of operations that will be tracked. Other operations will be given default strategy. + MaxOperations int + + // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName. + // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving + // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance + // in applications that always provide the correct span name on trace creation. + // + // For backwards compatibility this option is off by default. + OperationNameLateBinding bool + + // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler). + Strategies *sampling.PerOperationSamplingStrategies } -func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler { +// NewPerOperationSampler returns a new PerOperationSampler. +func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler { + if params.MaxOperations <= 0 { + params.MaxOperations = defaultMaxOperations + } samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler) - for _, strategy := range strategies.PerOperationStrategies { + for _, strategy := range params.Strategies.PerOperationStrategies { sampler := newGuaranteedThroughputProbabilisticSampler( - strategies.DefaultLowerBoundTracesPerSecond, + params.Strategies.DefaultLowerBoundTracesPerSecond, strategy.ProbabilisticSampling.SamplingRate, ) samplers[strategy.Operation] = sampler } - return &adaptiveSampler{ - samplers: samplers, - defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability), - lowerBound: strategies.DefaultLowerBoundTracesPerSecond, - maxOperations: maxOperations, + return &PerOperationSampler{ + samplers: samplers, + defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability), + lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond, + maxOperations: params.MaxOperations, + operationNameLateBinding: params.OperationNameLateBinding, } } -func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { +// IsSampled is not used and only exists to match Sampler V1 API. +// TODO (breaking change) remove when upgrading everything to SamplerV2 +func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return false, nil +} + +func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) { + samplerV1 := s.getSamplerForOperation(operationName) + var sampled bool + var tags []Tag + if span.context.samplingState.isLocalRootSpan(span.context.spanID) { + sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName) + } + return sampled, tags +} + +// OnCreateSpan implements OnCreateSpan of SamplerV2. +func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision { + sampled, tags := s.trySampling(span, span.OperationName()) + return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags} +} + +// OnSetOperationName implements OnSetOperationName of SamplerV2. +func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { + sampled, tags := s.trySampling(span, operationName) + return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags} +} + +// OnSetTag implements OnSetTag of SamplerV2. +func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +// OnFinishSpan implements OnFinishSpan of SamplerV2. +func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler { s.RLock() sampler, ok := s.samplers[operation] if ok { defer s.RUnlock() - return sampler.IsSampled(id, operation) + return sampler } s.RUnlock() s.Lock() @@ -315,18 +440,19 @@ func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) // Check if sampler has already been created sampler, ok = s.samplers[operation] if ok { - return sampler.IsSampled(id, operation) + return sampler } // Store only up to maxOperations of unique ops. if len(s.samplers) >= s.maxOperations { - return s.defaultSampler.IsSampled(id, operation) + return s.defaultSampler } newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate()) s.samplers[operation] = newSampler - return newSampler.IsSampled(id, operation) + return newSampler } -func (s *adaptiveSampler) Close() { +// Close invokes Close on all underlying samplers. +func (s *PerOperationSampler) Close() { s.Lock() defer s.Unlock() for _, sampler := range s.samplers { @@ -335,197 +461,56 @@ func (s *adaptiveSampler) Close() { s.defaultSampler.Close() } -func (s *adaptiveSampler) Equal(other Sampler) bool { - // NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple +func (s *PerOperationSampler) String() string { + var sb strings.Builder + + fmt.Fprintf(&sb, "PerOperationSampler(defaultSampler=%v, ", s.defaultSampler) + fmt.Fprintf(&sb, "lowerBound=%f, ", s.lowerBound) + fmt.Fprintf(&sb, "maxOperations=%d, ", s.maxOperations) + fmt.Fprintf(&sb, "operationNameLateBinding=%t, ", s.operationNameLateBinding) + fmt.Fprintf(&sb, "numOperations=%d,\n", len(s.samplers)) + fmt.Fprintf(&sb, "samplers=[") + for operationName, sampler := range s.samplers { + fmt.Fprintf(&sb, "\n(operationName=%s, sampler=%v)", operationName, sampler) + } + fmt.Fprintf(&sb, "])") + + return sb.String() +} + +// Equal is not used. +// TODO (breaking change) remove this in the future +func (s *PerOperationSampler) Equal(other Sampler) bool { + // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple // samplers which all need to be initialized before this function can be called for a comparison. - // Therefore, adaptiveSampler uses the update() function to only alter the samplers that need + // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need // changing. Hence this function always returns false so that the update function can be called. // Once the Equal() function is removed from the Sampler API, this will no longer be needed. return false } -func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) { +func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) { s.Lock() defer s.Unlock() + newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{} for _, strategy := range strategies.PerOperationStrategies { operation := strategy.Operation samplingRate := strategy.ProbabilisticSampling.SamplingRate lowerBound := strategies.DefaultLowerBoundTracesPerSecond if sampler, ok := s.samplers[operation]; ok { sampler.update(lowerBound, samplingRate) + newSamplers[operation] = sampler } else { sampler := newGuaranteedThroughputProbabilisticSampler( lowerBound, samplingRate, ) - s.samplers[operation] = sampler + newSamplers[operation] = sampler } } s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability { s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability) } -} - -// ----------------------- - -// RemotelyControlledSampler is a delegating sampler that polls a remote server -// for the appropriate sampling strategy, constructs a corresponding sampler and -// delegates to it for sampling decisions. -type RemotelyControlledSampler struct { - sync.RWMutex - samplerOptions - - serviceName string - timer *time.Ticker - manager sampling.SamplingManager - pollStopped sync.WaitGroup -} - -type httpSamplingManager struct { - serverURL string -} - -func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) { - var out sampling.SamplingStrategyResponse - v := url.Values{} - v.Set("service", serviceName) - if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil { - return nil, err - } - return &out, nil -} - -// NewRemotelyControlledSampler creates a sampler that periodically pulls -// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). -func NewRemotelyControlledSampler( - serviceName string, - opts ...SamplerOption, -) *RemotelyControlledSampler { - options := applySamplerOptions(opts...) - sampler := &RemotelyControlledSampler{ - samplerOptions: options, - serviceName: serviceName, - timer: time.NewTicker(options.samplingRefreshInterval), - manager: &httpSamplingManager{serverURL: options.samplingServerURL}, - } - - go sampler.pollController() - return sampler -} - -func applySamplerOptions(opts ...SamplerOption) samplerOptions { - options := samplerOptions{} - for _, option := range opts { - option(&options) - } - if options.sampler == nil { - options.sampler = newProbabilisticSampler(0.001) - } - if options.logger == nil { - options.logger = log.NullLogger - } - if options.maxOperations <= 0 { - options.maxOperations = defaultMaxOperations - } - if options.samplingServerURL == "" { - options.samplingServerURL = defaultSamplingServerURL - } - if options.metrics == nil { - options.metrics = NewNullMetrics() - } - if options.samplingRefreshInterval <= 0 { - options.samplingRefreshInterval = defaultSamplingRefreshInterval - } - return options -} - -// IsSampled implements IsSampled() of Sampler. -func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - s.RLock() - defer s.RUnlock() - return s.sampler.IsSampled(id, operation) -} - -// Close implements Close() of Sampler. -func (s *RemotelyControlledSampler) Close() { - s.RLock() - s.timer.Stop() - s.RUnlock() - - s.pollStopped.Wait() -} - -// Equal implements Equal() of Sampler. -func (s *RemotelyControlledSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for - // more information. - if o, ok := other.(*RemotelyControlledSampler); ok { - s.RLock() - o.RLock() - defer s.RUnlock() - defer o.RUnlock() - return s.sampler.Equal(o.sampler) - } - return false -} - -func (s *RemotelyControlledSampler) pollController() { - // in unit tests we re-assign the timer Ticker, so need to lock to avoid data races - s.Lock() - timer := s.timer - s.Unlock() - - for range timer.C { - s.updateSampler() - } - s.pollStopped.Add(1) -} - -func (s *RemotelyControlledSampler) updateSampler() { - res, err := s.manager.GetSamplingStrategy(s.serviceName) - if err != nil { - s.metrics.SamplerQueryFailure.Inc(1) - return - } - s.Lock() - defer s.Unlock() - - s.metrics.SamplerRetrieved.Inc(1) - if strategies := res.GetOperationSampling(); strategies != nil { - s.updateAdaptiveSampler(strategies) - } else { - err = s.updateRateLimitingOrProbabilisticSampler(res) - } - if err != nil { - s.metrics.SamplerUpdateFailure.Inc(1) - s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err) - return - } - s.metrics.SamplerUpdated.Inc(1) -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) { - if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok { - adaptiveSampler.update(strategies) - } else { - s.sampler = newAdaptiveSampler(strategies, s.maxOperations) - } -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error { - var newSampler Sampler - if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil { - newSampler = newProbabilisticSampler(probabilistic.SamplingRate) - } else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil { - newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond)) - } else { - return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType()) - } - if !s.sampler.Equal(newSampler) { - s.sampler = newSampler - } - return nil + s.samplers = newSamplers } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_options.go deleted file mode 100644 index 75d28a561..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_options.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" -) - -// SamplerOption is a function that sets some option on the sampler -type SamplerOption func(options *samplerOptions) - -// SamplerOptions is a factory for all available SamplerOption's -var SamplerOptions samplerOptions - -type samplerOptions struct { - metrics *Metrics - maxOperations int - sampler Sampler - logger Logger - samplingServerURL string - samplingRefreshInterval time.Duration -} - -// Metrics creates a SamplerOption that initializes Metrics on the sampler, -// which is used to emit statistics. -func (samplerOptions) Metrics(m *Metrics) SamplerOption { - return func(o *samplerOptions) { - o.metrics = m - } -} - -// MaxOperations creates a SamplerOption that sets the maximum number of -// operations the sampler will keep track of. -func (samplerOptions) MaxOperations(maxOperations int) SamplerOption { - return func(o *samplerOptions) { - o.maxOperations = maxOperations - } -} - -// InitialSampler creates a SamplerOption that sets the initial sampler -// to use before a remote sampler is created and used. -func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption { - return func(o *samplerOptions) { - o.sampler = sampler - } -} - -// Logger creates a SamplerOption that sets the logger used by the sampler. -func (samplerOptions) Logger(logger Logger) SamplerOption { - return func(o *samplerOptions) { - o.logger = logger - } -} - -// SamplingServerURL creates a SamplerOption that sets the sampling server url -// of the local agent that contains the sampling strategies. -func (samplerOptions) SamplingServerURL(samplingServerURL string) SamplerOption { - return func(o *samplerOptions) { - o.samplingServerURL = samplingServerURL - } -} - -// SamplingRefreshInterval creates a SamplerOption that sets how often the -// sampler will poll local agent for the appropriate sampling strategy. -func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption { - return func(o *samplerOptions) { - o.samplingRefreshInterval = samplingRefreshInterval - } -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_remote.go new file mode 100644 index 000000000..119f0a1bb --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_remote.go @@ -0,0 +1,358 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/uber/jaeger-client-go/log" + "github.com/uber/jaeger-client-go/thrift-gen/sampling" +) + +const ( + defaultRemoteSamplingTimeout = 10 * time.Second + defaultSamplingRefreshInterval = time.Minute +) + +// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server. +type SamplingStrategyFetcher interface { + Fetch(service string) ([]byte, error) +} + +// SamplingStrategyParser is used to parse sampling strategy updates. The output object +// should be of the type that is recognized by the SamplerUpdaters. +type SamplingStrategyParser interface { + Parse(response []byte) (interface{}, error) +} + +// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies, +// retrieved from remote config server, to the current sampler. The updater can modify +// the sampler in-place if sampler supports it, or create a new one. +// +// If the strategy does not contain configuration for the sampler in question, +// updater must return modifiedSampler=nil to give other updaters a chance to inspect +// the sampling strategy response. +// +// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler. +type SamplerUpdater interface { + Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error) +} + +// RemotelyControlledSampler is a delegating sampler that polls a remote server +// for the appropriate sampling strategy, constructs a corresponding sampler and +// delegates to it for sampling decisions. +type RemotelyControlledSampler struct { + // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. + // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq + closed int64 // 0 - not closed, 1 - closed + + sync.RWMutex // used to serialize access to samplerOptions.sampler + samplerOptions + + serviceName string + doneChan chan *sync.WaitGroup +} + +// NewRemotelyControlledSampler creates a sampler that periodically pulls +// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). +func NewRemotelyControlledSampler( + serviceName string, + opts ...SamplerOption, +) *RemotelyControlledSampler { + options := new(samplerOptions).applyOptionsAndDefaults(opts...) + sampler := &RemotelyControlledSampler{ + samplerOptions: *options, + serviceName: serviceName, + doneChan: make(chan *sync.WaitGroup), + } + go sampler.pollController() + return sampler +} + +// IsSampled implements IsSampled() of Sampler. +// TODO (breaking change) remove when Sampler V1 is removed +func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { + return false, nil +} + +// OnCreateSpan implements OnCreateSpan of SamplerV2. +func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision { + s.RLock() + defer s.RUnlock() + return s.sampler.OnCreateSpan(span) +} + +// OnSetOperationName implements OnSetOperationName of SamplerV2. +func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { + s.RLock() + defer s.RUnlock() + return s.sampler.OnSetOperationName(span, operationName) +} + +// OnSetTag implements OnSetTag of SamplerV2. +func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + s.RLock() + defer s.RUnlock() + return s.sampler.OnSetTag(span, key, value) +} + +// OnFinishSpan implements OnFinishSpan of SamplerV2. +func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision { + s.RLock() + defer s.RUnlock() + return s.sampler.OnFinishSpan(span) +} + +// Close implements Close() of Sampler. +func (s *RemotelyControlledSampler) Close() { + if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped { + s.logger.Error("Repeated attempt to close the sampler is ignored") + return + } + + var wg sync.WaitGroup + wg.Add(1) + s.doneChan <- &wg + wg.Wait() +} + +// Equal implements Equal() of Sampler. +func (s *RemotelyControlledSampler) Equal(other Sampler) bool { + // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for + // more information. + return false +} + +func (s *RemotelyControlledSampler) pollController() { + ticker := time.NewTicker(s.samplingRefreshInterval) + defer ticker.Stop() + s.pollControllerWithTicker(ticker) +} + +func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) { + for { + select { + case <-ticker.C: + s.UpdateSampler() + case wg := <-s.doneChan: + wg.Done() + return + } + } +} + +// Sampler returns the currently active sampler. +func (s *RemotelyControlledSampler) Sampler() SamplerV2 { + s.RLock() + defer s.RUnlock() + return s.sampler +} +func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) { + s.Lock() + defer s.Unlock() + s.sampler = sampler +} + +// UpdateSampler forces the sampler to fetch sampling strategy from backend server. +// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests. +func (s *RemotelyControlledSampler) UpdateSampler() { + res, err := s.samplingFetcher.Fetch(s.serviceName) + if err != nil { + s.metrics.SamplerQueryFailure.Inc(1) + s.logger.Infof("failed to fetch sampling strategy: %v", err) + return + } + strategy, err := s.samplingParser.Parse(res) + if err != nil { + s.metrics.SamplerUpdateFailure.Inc(1) + s.logger.Infof("failed to parse sampling strategy response: %v", err) + return + } + + s.Lock() + defer s.Unlock() + + s.metrics.SamplerRetrieved.Inc(1) + if err := s.updateSamplerViaUpdaters(strategy); err != nil { + s.metrics.SamplerUpdateFailure.Inc(1) + s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err) + return + } + s.metrics.SamplerUpdated.Inc(1) +} + +// NB: this function should only be called while holding a Write lock +func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error { + for _, updater := range s.updaters { + sampler, err := updater.Update(s.sampler, strategy) + if err != nil { + return err + } + if sampler != nil { + s.logger.Debugf("sampler updated: %+v", sampler) + s.sampler = sampler + return nil + } + } + return fmt.Errorf("unsupported sampling strategy %+v", strategy) +} + +// ----------------------- + +// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +type ProbabilisticSamplerUpdater struct{} + +// Update implements Update of SamplerUpdater. +func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if resp, ok := strategy.(response); ok { + if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil { + if ps, ok := sampler.(*ProbabilisticSampler); ok { + if err := ps.Update(probabilistic.SamplingRate); err != nil { + return nil, err + } + return sampler, nil + } + return newProbabilisticSampler(probabilistic.SamplingRate), nil + } + } + return nil, nil +} + +// ----------------------- + +// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +type RateLimitingSamplerUpdater struct{} + +// Update implements Update of SamplerUpdater. +func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if resp, ok := strategy.(response); ok { + if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil { + rateLimit := float64(rateLimiting.MaxTracesPerSecond) + if rl, ok := sampler.(*RateLimitingSampler); ok { + rl.Update(rateLimit) + return rl, nil + } + return NewRateLimitingSampler(rateLimit), nil + } + } + return nil, nil +} + +// ----------------------- + +// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. +// Fields have the same meaning as in PerOperationSamplerParams. +type AdaptiveSamplerUpdater struct { + MaxOperations int + OperationNameLateBinding bool +} + +// Update implements Update of SamplerUpdater. +func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { + type response interface { + GetOperationSampling() *sampling.PerOperationSamplingStrategies + } + var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check + if p, ok := strategy.(response); ok { + if operations := p.GetOperationSampling(); operations != nil { + if as, ok := sampler.(*PerOperationSampler); ok { + as.update(operations) + return as, nil + } + return NewPerOperationSampler(PerOperationSamplerParams{ + MaxOperations: u.MaxOperations, + OperationNameLateBinding: u.OperationNameLateBinding, + Strategies: operations, + }), nil + } + } + return nil, nil +} + +// ----------------------- + +type httpSamplingStrategyFetcher struct { + serverURL string + logger log.DebugLogger + httpClient http.Client +} + +func newHTTPSamplingStrategyFetcher(serverURL string, logger log.DebugLogger) *httpSamplingStrategyFetcher { + customTransport := http.DefaultTransport.(*http.Transport).Clone() + customTransport.ResponseHeaderTimeout = defaultRemoteSamplingTimeout + + return &httpSamplingStrategyFetcher{ + serverURL: serverURL, + logger: logger, + httpClient: http.Client{ + Transport: customTransport, + }, + } +} + +func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) { + v := url.Values{} + v.Set("service", serviceName) + uri := f.serverURL + "?" + v.Encode() + + resp, err := f.httpClient.Get(uri) + if err != nil { + return nil, err + } + + defer func() { + if err := resp.Body.Close(); err != nil { + f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err)) + } + }() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body) + } + + return body, nil +} + +// ----------------------- + +type samplingStrategyParser struct{} + +func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) { + strategy := new(sampling.SamplingStrategyResponse) + if err := json.Unmarshal(response, strategy); err != nil { + return nil, err + } + return strategy, nil +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go new file mode 100644 index 000000000..64b028bf3 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go @@ -0,0 +1,159 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "time" + + "github.com/uber/jaeger-client-go/log" +) + +// SamplerOption is a function that sets some option on the sampler +type SamplerOption func(options *samplerOptions) + +// SamplerOptions is a factory for all available SamplerOption's. +var SamplerOptions SamplerOptionsFactory + +// SamplerOptionsFactory is a factory for all available SamplerOption's. +// The type acts as a namespace for factory functions. It is public to +// make the functions discoverable via godoc. Recommended to be used +// via global SamplerOptions variable. +type SamplerOptionsFactory struct{} + +type samplerOptions struct { + metrics *Metrics + sampler SamplerV2 + logger log.DebugLogger + samplingServerURL string + samplingRefreshInterval time.Duration + samplingFetcher SamplingStrategyFetcher + samplingParser SamplingStrategyParser + updaters []SamplerUpdater + posParams PerOperationSamplerParams +} + +// Metrics creates a SamplerOption that initializes Metrics on the sampler, +// which is used to emit statistics. +func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption { + return func(o *samplerOptions) { + o.metrics = m + } +} + +// MaxOperations creates a SamplerOption that sets the maximum number of +// operations the sampler will keep track of. +func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption { + return func(o *samplerOptions) { + o.posParams.MaxOperations = maxOperations + } +} + +// OperationNameLateBinding creates a SamplerOption that sets the respective +// field in the PerOperationSamplerParams. +func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption { + return func(o *samplerOptions) { + o.posParams.OperationNameLateBinding = enable + } +} + +// InitialSampler creates a SamplerOption that sets the initial sampler +// to use before a remote sampler is created and used. +func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption { + return func(o *samplerOptions) { + o.sampler = samplerV1toV2(sampler) + } +} + +// Logger creates a SamplerOption that sets the logger used by the sampler. +func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption { + return func(o *samplerOptions) { + o.logger = log.DebugLogAdapter(logger) + } +} + +// SamplingServerURL creates a SamplerOption that sets the sampling server url +// of the local agent that contains the sampling strategies. +func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption { + return func(o *samplerOptions) { + o.samplingServerURL = samplingServerURL + } +} + +// SamplingRefreshInterval creates a SamplerOption that sets how often the +// sampler will poll local agent for the appropriate sampling strategy. +func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption { + return func(o *samplerOptions) { + o.samplingRefreshInterval = samplingRefreshInterval + } +} + +// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher. +func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption { + return func(o *samplerOptions) { + o.samplingFetcher = fetcher + } +} + +// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser. +func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption { + return func(o *samplerOptions) { + o.samplingParser = parser + } +} + +// Updaters creates a SamplerOption that initializes sampler updaters. +func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption { + return func(o *samplerOptions) { + o.updaters = updaters + } +} + +func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions { + for _, option := range opts { + option(o) + } + if o.sampler == nil { + o.sampler = newProbabilisticSampler(0.001) + } + if o.logger == nil { + o.logger = log.NullLogger + } + if o.samplingServerURL == "" { + o.samplingServerURL = DefaultSamplingServerURL + } + if o.metrics == nil { + o.metrics = NewNullMetrics() + } + if o.samplingRefreshInterval <= 0 { + o.samplingRefreshInterval = defaultSamplingRefreshInterval + } + if o.samplingFetcher == nil { + o.samplingFetcher = newHTTPSamplingStrategyFetcher(o.samplingServerURL, o.logger) + } + if o.samplingParser == nil { + o.samplingParser = new(samplingStrategyParser) + } + if o.updaters == nil { + o.updaters = []SamplerUpdater{ + &AdaptiveSamplerUpdater{ + MaxOperations: o.posParams.MaxOperations, + OperationNameLateBinding: o.posParams.OperationNameLateBinding, + }, + new(ProbabilisticSamplerUpdater), + new(RateLimitingSamplerUpdater), + } + } + return o +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_v2.go new file mode 100644 index 000000000..a50671a23 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/sampler_v2.go @@ -0,0 +1,93 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +// SamplingDecision is returned by the V2 samplers. +type SamplingDecision struct { + Sample bool + Retryable bool + Tags []Tag +} + +// SamplerV2 is an extension of the V1 samplers that allows sampling decisions +// be made at different points of the span lifecycle. +type SamplerV2 interface { + OnCreateSpan(span *Span) SamplingDecision + OnSetOperationName(span *Span, operationName string) SamplingDecision + OnSetTag(span *Span, key string, value interface{}) SamplingDecision + OnFinishSpan(span *Span) SamplingDecision + + // Close does a clean shutdown of the sampler, stopping any background + // go-routines it may have started. + Close() +} + +// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2. +func samplerV1toV2(s Sampler) SamplerV2 { + if s2, ok := s.(SamplerV2); ok { + return s2 + } + type legacySamplerV1toV2Adapter struct { + legacySamplerV1Base + } + return &legacySamplerV1toV2Adapter{ + legacySamplerV1Base: legacySamplerV1Base{ + delegate: s.IsSampled, + }, + } +} + +// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods. +// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler +// for backwards compatibility reasons. +// TODO (breaking change) remove this in the next major release +type SamplerV2Base struct{} + +// IsSampled implements IsSampled of Sampler. +func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) { + return false, nil +} + +// Close implements Close of Sampler. +func (SamplerV2Base) Close() {} + +// Equal implements Equal of Sampler. +func (SamplerV2Base) Equal(other Sampler) bool { return false } + +// legacySamplerV1Base is used as a base for simple samplers that only implement +// the legacy isSampled() function that is not sensitive to its arguments. +type legacySamplerV1Base struct { + delegate func(id TraceID, operation string) (sampled bool, tags []Tag) +} + +func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision { + isSampled, tags := s.delegate(span.context.traceID, span.operationName) + return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags} +} + +func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision { + isSampled, tags := s.delegate(span.context.traceID, span.operationName) + return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags} +} + +func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision { + return SamplingDecision{Sample: false, Retryable: true} +} + +func (s *legacySamplerV1Base) Close() {} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/span.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/span.go index 132fb7217..997cffdd8 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/span.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/span.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ package jaeger import ( "sync" + "sync/atomic" "time" "github.com/opentracing/opentracing-go" @@ -25,10 +26,15 @@ import ( // Span implements opentracing.Span type Span struct { + // referenceCounter used to increase the lifetime of + // the object before return it into the pool. + referenceCounter int32 + sync.RWMutex tracer *Tracer + // TODO: (breaking change) change to use a pointer context SpanContext // The name of the "operation" this span is an instance of. @@ -53,6 +59,9 @@ type Span struct { // The span's "micro-log" logs []opentracing.LogRecord + // The number of logs dropped because of MaxLogsPerSpan. + numDroppedLogs int + // references for this span references []Reference @@ -60,18 +69,27 @@ type Span struct { } // Tag is a simple key value wrapper. -// TODO deprecate in the next major release, use opentracing.Tag instead. +// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. type Tag struct { key string value interface{} } +// NewTag creates a new Tag. +// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. +func NewTag(key string, value interface{}) Tag { + return Tag{key: key, value: value} +} + // SetOperationName sets or changes the operation name. func (s *Span) SetOperationName(operationName string) opentracing.Span { s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - s.operationName = operationName + s.operationName = operationName + ctx := s.context + s.Unlock() + if !ctx.isSamplingFinalized() { + decision := s.tracer.sampler.OnSetOperationName(s, operationName) + s.applySamplingDecision(decision, true) } s.observer.OnSetOperationName(operationName) return s @@ -79,19 +97,100 @@ func (s *Span) SetOperationName(operationName string) opentracing.Span { // SetTag implements SetTag() of opentracing.Span func (s *Span) SetTag(key string, value interface{}) opentracing.Span { + return s.setTagInternal(key, value, true) +} + +func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span { + var ctx SpanContext + var operationName string + if lock { + ctx = s.SpanContext() + operationName = s.OperationName() + } else { + ctx = s.context + operationName = s.operationName + } + s.observer.OnSetTag(key, value) - if key == string(ext.SamplingPriority) && setSamplingPriority(s, value) { + if key == string(ext.SamplingPriority) && !setSamplingPriority(ctx.samplingState, operationName, s.tracer, value) { return s } + if !ctx.isSamplingFinalized() { + decision := s.tracer.sampler.OnSetTag(s, key, value) + s.applySamplingDecision(decision, lock) + } + if ctx.isWriteable() { + if lock { + s.Lock() + defer s.Unlock() + } + s.appendTagNoLocking(key, value) + } + return s +} + +// SpanContext returns span context +func (s *Span) SpanContext() SpanContext { s.Lock() defer s.Unlock() - if s.context.IsSampled() { - s.setTagNoLocking(key, value) + return s.context +} + +// StartTime returns span start time +func (s *Span) StartTime() time.Time { + s.Lock() + defer s.Unlock() + return s.startTime +} + +// Duration returns span duration +func (s *Span) Duration() time.Duration { + s.Lock() + defer s.Unlock() + return s.duration +} + +// Tags returns tags for span +func (s *Span) Tags() opentracing.Tags { + s.Lock() + defer s.Unlock() + var result = make(opentracing.Tags, len(s.tags)) + for _, tag := range s.tags { + result[tag.key] = tag.value } - return s + return result +} + +// Logs returns micro logs for span +func (s *Span) Logs() []opentracing.LogRecord { + s.Lock() + defer s.Unlock() + + logs := append([]opentracing.LogRecord(nil), s.logs...) + if s.numDroppedLogs != 0 { + fixLogs(logs, s.numDroppedLogs) + } + + return logs +} + +// References returns references for this span +func (s *Span) References() []opentracing.SpanReference { + s.Lock() + defer s.Unlock() + + if s.references == nil || len(s.references) == 0 { + return nil + } + + result := make([]opentracing.SpanReference, len(s.references)) + for i, r := range s.references { + result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context} + } + return result } -func (s *Span) setTagNoLocking(key string, value interface{}) { +func (s *Span) appendTagNoLocking(key string, value interface{}) { s.tags = append(s.tags, Tag{key: key, value: value}) } @@ -111,7 +210,7 @@ func (s *Span) logFieldsNoLocking(fields ...log.Field) { Fields: fields, Timestamp: time.Now(), } - s.appendLog(lr) + s.appendLogNoLocking(lr) } // LogKV implements opentracing.Span API @@ -148,17 +247,81 @@ func (s *Span) Log(ld opentracing.LogData) { if ld.Timestamp.IsZero() { ld.Timestamp = s.tracer.timeNow() } - s.appendLog(ld.ToLogRecord()) + s.appendLogNoLocking(ld.ToLogRecord()) } } // this function should only be called while holding a Write lock -func (s *Span) appendLog(lr opentracing.LogRecord) { - // TODO add logic to limit number of logs per span (issue #46) - s.logs = append(s.logs, lr) +func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) { + maxLogs := s.tracer.options.maxLogsPerSpan + if maxLogs == 0 || len(s.logs) < maxLogs { + s.logs = append(s.logs, lr) + return + } + + // We have too many logs. We don't touch the first numOld logs; we treat the + // rest as a circular buffer and overwrite the oldest log among those. + numOld := (maxLogs - 1) / 2 + numNew := maxLogs - numOld + s.logs[numOld+s.numDroppedLogs%numNew] = lr + s.numDroppedLogs++ +} + +// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at +// the end (i.e. pos circular left shifts). +func rotateLogBuffer(buf []opentracing.LogRecord, pos int) { + // This algorithm is described in: + // http://www.cplusplus.com/reference/algorithm/rotate + for first, middle, next := 0, pos, pos; first != middle; { + buf[first], buf[next] = buf[next], buf[first] + first++ + next++ + if next == len(buf) { + next = middle + } else if first == middle { + middle = next + } + } +} + +func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) { + // We dropped some log events, which means that we used part of Logs as a + // circular buffer (see appendLog). De-circularize it. + numOld := (len(logs) - 1) / 2 + numNew := len(logs) - numOld + rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew) + + // Replace the log in the middle (the oldest "new" log) with information + // about the dropped logs. This means that we are effectively dropping one + // more "new" log. + numDropped := numDroppedLogs + 1 + logs[numOld] = opentracing.LogRecord{ + // Keep the timestamp of the last dropped event. + Timestamp: logs[numOld].Timestamp, + Fields: []log.Field{ + log.String("event", "dropped Span logs"), + log.Int("dropped_log_count", numDropped), + log.String("component", "jaeger-client"), + }, + } +} + +func (s *Span) fixLogsIfDropped() { + if s.numDroppedLogs == 0 { + return + } + fixLogs(s.logs, s.numDroppedLogs) + s.numDroppedLogs = 0 } -// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext +// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext. +// The call is proxied via tracer.baggageSetter to allow policies to be applied +// before allowing to set/replace baggage keys. +// The setter eventually stores a new SpanContext with extended baggage: +// +// span.context = span.context.WithBaggageItem(key, value) +// +// See SpanContext.WithBaggageItem() for explanation why it's done this way. func (s *Span) SetBaggageItem(key, value string) opentracing.Span { s.Lock() defer s.Unlock() @@ -174,6 +337,8 @@ func (s *Span) BaggageItem(key string) string { } // Finish implements opentracing.Span API +// After finishing the Span object it returns back to the allocator unless the reporter retains it again, +// so after that, the Span object should no longer be used because it won't be valid anymore. func (s *Span) Finish() { s.FinishWithOptions(opentracing.FinishOptions{}) } @@ -185,23 +350,36 @@ func (s *Span) FinishWithOptions(options opentracing.FinishOptions) { } s.observer.OnFinish(options) s.Lock() - if s.context.IsSampled() { - s.duration = options.FinishTime.Sub(s.startTime) - // Note: bulk logs are not subject to maxLogsPerSpan limit - if options.LogRecords != nil { - s.logs = append(s.logs, options.LogRecords...) - } - for _, ld := range options.BulkLogData { - s.logs = append(s.logs, ld.ToLogRecord()) + s.duration = options.FinishTime.Sub(s.startTime) + ctx := s.context + s.Unlock() + if !ctx.isSamplingFinalized() { + decision := s.tracer.sampler.OnFinishSpan(s) + s.applySamplingDecision(decision, true) + } + if ctx.IsSampled() { + s.Lock() + s.fixLogsIfDropped() + if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 { + // Note: bulk logs are not subject to maxLogsPerSpan limit + if options.LogRecords != nil { + s.logs = append(s.logs, options.LogRecords...) + } + for _, ld := range options.BulkLogData { + s.logs = append(s.logs, ld.ToLogRecord()) + } } + s.Unlock() } - s.Unlock() // call reportSpan even for non-sampled traces, to return span to the pool + // and update metrics counter s.tracer.reportSpan(s) } // Context implements opentracing.Span API func (s *Span) Context() opentracing.SpanContext { + s.Lock() + defer s.Unlock() return s.context } @@ -223,20 +401,103 @@ func (s *Span) OperationName() string { return s.operationName } +// Retain increases object counter to increase the lifetime of the object +func (s *Span) Retain() *Span { + atomic.AddInt32(&s.referenceCounter, 1) + return s +} + +// Release decrements object counter and return to the +// allocator manager when counter will below zero +func (s *Span) Release() { + if atomic.AddInt32(&s.referenceCounter, -1) == -1 { + s.tracer.spanAllocator.Put(s) + } +} + +// reset span state and release unused data +func (s *Span) reset() { + s.firstInProcess = false + s.context = emptyContext + s.operationName = "" + s.tracer = nil + s.startTime = time.Time{} + s.duration = 0 + s.observer = nil + atomic.StoreInt32(&s.referenceCounter, 0) + + // Note: To reuse memory we can save the pointers on the heap + s.tags = s.tags[:0] + s.logs = s.logs[:0] + s.numDroppedLogs = 0 + s.references = s.references[:0] +} + func (s *Span) serviceName() string { return s.tracer.serviceName } -func setSamplingPriority(s *Span, value interface{}) bool { - s.Lock() - defer s.Unlock() - if val, ok := value.(uint16); ok { - if val > 0 { - s.context.flags = s.context.flags | flagDebug | flagSampled - } else { - s.context.flags = s.context.flags & (^flagSampled) +func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) { + var ctx SpanContext + if lock { + ctx = s.SpanContext() + } else { + ctx = s.context + } + + if !decision.Retryable { + ctx.samplingState.setFinal() + } + if decision.Sample { + ctx.samplingState.setSampled() + if len(decision.Tags) > 0 { + if lock { + s.Lock() + defer s.Unlock() + } + for _, tag := range decision.Tags { + s.appendTagNoLocking(tag.key, tag.value) + } } + } +} + +// setSamplingPriority returns true if the flag was updated successfully, false otherwise. +// The behavior of setSamplingPriority is surprising +// If noDebugFlagOnForcedSampling is set +// setSamplingPriority(..., 1) always sets only flagSampled +// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes +// setSamplingPriority(..., 1) sets both flagSampled and flagDebug +// However, +// setSamplingPriority(..., 0) always only resets flagSampled +// +// This means that doing a setSamplingPriority(..., 1) followed by setSamplingPriority(..., 0) can +// leave flagDebug set +func setSamplingPriority(state *samplingState, operationName string, tracer *Tracer, value interface{}) bool { + val, ok := value.(uint16) + if !ok { + return false + } + if val == 0 { + state.unsetSampled() + state.setFinal() + return true + } + if tracer.options.noDebugFlagOnForcedSampling { + state.setSampled() + state.setFinal() + return true + } else if tracer.isDebugAllowed(operationName) { + state.setDebugAndSampled() + state.setFinal() return true } return false } + +// EnableFirehose enables firehose flag on the span context +func EnableFirehose(s *Span) { + s.Lock() + defer s.Unlock() + s.context.samplingState.setFirehose() +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/span_allocator.go new file mode 100644 index 000000000..fba1e4337 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/span_allocator.go @@ -0,0 +1,56 @@ +// Copyright (c) 2019 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import "sync" + +// SpanAllocator abstraction of managing span allocations +type SpanAllocator interface { + Get() *Span + Put(*Span) +} + +type syncPollSpanAllocator struct { + spanPool sync.Pool +} + +func newSyncPollSpanAllocator() SpanAllocator { + return &syncPollSpanAllocator{ + spanPool: sync.Pool{New: func() interface{} { + return &Span{} + }}, + } +} + +func (pool *syncPollSpanAllocator) Get() *Span { + return pool.spanPool.Get().(*Span) +} + +func (pool *syncPollSpanAllocator) Put(span *Span) { + span.reset() + pool.spanPool.Put(span) +} + +type simpleSpanAllocator struct{} + +func (pool simpleSpanAllocator) Get() *Span { + return &Span{} +} + +func (pool simpleSpanAllocator) Put(span *Span) { + // @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351 + // since finished spans are not reused, no need to reset them + // span.reset() +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/span_context.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/span_context.go new file mode 100644 index 000000000..5b2307be9 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/span_context.go @@ -0,0 +1,418 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "errors" + "fmt" + "strconv" + "strings" + "sync" + + "go.uber.org/atomic" +) + +const ( + flagSampled = 1 + flagDebug = 2 + flagFirehose = 8 +) + +var ( + errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state") + errMalformedTracerStateString = errors.New("String does not match tracer state format") + + emptyContext = SpanContext{} +) + +// TraceID represents unique 128bit identifier of a trace +type TraceID struct { + High, Low uint64 +} + +// SpanID represents unique 64bit identifier of a span +type SpanID uint64 + +// SpanContext represents propagated span identity and state +type SpanContext struct { + // traceID represents globally unique ID of the trace. + // Usually generated as a random number. + traceID TraceID + + // spanID represents span ID that must be unique within its trace, + // but does not have to be globally unique. + spanID SpanID + + // parentID refers to the ID of the parent span. + // Should be 0 if the current span is a root span. + parentID SpanID + + // Distributed Context baggage. The is a snapshot in time. + baggage map[string]string + + // debugID can be set to some correlation ID when the context is being + // extracted from a TextMap carrier. + // + // See JaegerDebugHeader in constants.go + debugID string + + // samplingState is shared across all spans + samplingState *samplingState + + // remote indicates that span context represents a remote parent + remote bool +} + +type samplingState struct { + // Span context's state flags that are propagated across processes. Only lower 8 bits are used. + // We use an int32 instead of byte to be able to use CAS operations. + stateFlags atomic.Int32 + + // When state is not final, sampling will be retried on other span write operations, + // like SetOperationName / SetTag, and the spans will remain writable. + final atomic.Bool + + // localRootSpan stores the SpanID of the first span created in this process for a given trace. + localRootSpan SpanID + + // extendedState allows samplers to keep intermediate state. + // The keys and values in this map are completely opaque: interface{} -> interface{}. + extendedState sync.Map +} + +func (s *samplingState) isLocalRootSpan(id SpanID) bool { + return id == s.localRootSpan +} + +func (s *samplingState) setFlag(newFlag int32) { + swapped := false + for !swapped { + old := s.stateFlags.Load() + swapped = s.stateFlags.CAS(old, old|newFlag) + } +} + +func (s *samplingState) unsetFlag(newFlag int32) { + swapped := false + for !swapped { + old := s.stateFlags.Load() + swapped = s.stateFlags.CAS(old, old&^newFlag) + } +} + +func (s *samplingState) setSampled() { + s.setFlag(flagSampled) +} + +func (s *samplingState) unsetSampled() { + s.unsetFlag(flagSampled) +} + +func (s *samplingState) setDebugAndSampled() { + s.setFlag(flagDebug | flagSampled) +} + +func (s *samplingState) setFirehose() { + s.setFlag(flagFirehose) +} + +func (s *samplingState) setFlags(flags byte) { + s.stateFlags.Store(int32(flags)) +} + +func (s *samplingState) setFinal() { + s.final.Store(true) +} + +func (s *samplingState) flags() byte { + return byte(s.stateFlags.Load()) +} + +func (s *samplingState) isSampled() bool { + return s.stateFlags.Load()&flagSampled == flagSampled +} + +func (s *samplingState) isDebug() bool { + return s.stateFlags.Load()&flagDebug == flagDebug +} + +func (s *samplingState) isFirehose() bool { + return s.stateFlags.Load()&flagFirehose == flagFirehose +} + +func (s *samplingState) isFinal() bool { + return s.final.Load() +} + +func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} { + if value, ok := s.extendedState.Load(key); ok { + return value + } + value := initValue() + value, _ = s.extendedState.LoadOrStore(key, value) + return value +} + +// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext +func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { + for k, v := range c.baggage { + if !handler(k, v) { + break + } + } +} + +// IsSampled returns whether this trace was chosen for permanent storage +// by the sampling mechanism of the tracer. +func (c SpanContext) IsSampled() bool { + return c.samplingState.isSampled() +} + +// IsDebug indicates whether sampling was explicitly requested by the service. +func (c SpanContext) IsDebug() bool { + return c.samplingState.isDebug() +} + +// IsSamplingFinalized indicates whether the sampling decision has been finalized. +func (c SpanContext) IsSamplingFinalized() bool { + return c.samplingState.isFinal() +} + +// IsFirehose indicates whether the firehose flag was set +func (c SpanContext) IsFirehose() bool { + return c.samplingState.isFirehose() +} + +// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist, +// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler). +func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} { + return c.samplingState.extendedStateForKey(key, initValue) +} + +// IsValid indicates whether this context actually represents a valid trace. +func (c SpanContext) IsValid() bool { + return c.traceID.IsValid() && c.spanID != 0 +} + +// SetFirehose enables firehose mode for this trace. +func (c SpanContext) SetFirehose() { + c.samplingState.setFirehose() +} + +func (c SpanContext) String() string { + var flags int32 + if c.samplingState != nil { + flags = c.samplingState.stateFlags.Load() + } + if c.traceID.High == 0 { + return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags) + } + return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags) +} + +// ContextFromString reconstructs the Context encoded in a string +func ContextFromString(value string) (SpanContext, error) { + var context SpanContext + if value == "" { + return emptyContext, errEmptyTracerStateString + } + parts := strings.Split(value, ":") + if len(parts) != 4 { + return emptyContext, errMalformedTracerStateString + } + var err error + if context.traceID, err = TraceIDFromString(parts[0]); err != nil { + return emptyContext, err + } + if context.spanID, err = SpanIDFromString(parts[1]); err != nil { + return emptyContext, err + } + if context.parentID, err = SpanIDFromString(parts[2]); err != nil { + return emptyContext, err + } + flags, err := strconv.ParseUint(parts[3], 10, 8) + if err != nil { + return emptyContext, err + } + context.samplingState = &samplingState{} + context.samplingState.setFlags(byte(flags)) + return context, nil +} + +// TraceID returns the trace ID of this span context +func (c SpanContext) TraceID() TraceID { + return c.traceID +} + +// SpanID returns the span ID of this span context +func (c SpanContext) SpanID() SpanID { + return c.spanID +} + +// ParentID returns the parent span ID of this span context +func (c SpanContext) ParentID() SpanID { + return c.parentID +} + +// Flags returns the bitmap containing such bits as 'sampled' and 'debug'. +func (c SpanContext) Flags() byte { + return c.samplingState.flags() +} + +// Span can be written to if it is sampled or the sampling decision has not been finalized. +func (c SpanContext) isWriteable() bool { + state := c.samplingState + return !state.isFinal() || state.isSampled() +} + +func (c SpanContext) isSamplingFinalized() bool { + return c.samplingState.isFinal() +} + +// NewSpanContext creates a new instance of SpanContext +func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { + samplingState := &samplingState{} + if sampled { + samplingState.setSampled() + } + + return SpanContext{ + traceID: traceID, + spanID: spanID, + parentID: parentID, + samplingState: samplingState, + baggage: baggage} +} + +// CopyFrom copies data from ctx into this context, including span identity and baggage. +// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing. +func (c *SpanContext) CopyFrom(ctx *SpanContext) { + c.traceID = ctx.traceID + c.spanID = ctx.spanID + c.parentID = ctx.parentID + c.samplingState = ctx.samplingState + if l := len(ctx.baggage); l > 0 { + c.baggage = make(map[string]string, l) + for k, v := range ctx.baggage { + c.baggage[k] = v + } + } else { + c.baggage = nil + } +} + +// WithBaggageItem creates a new context with an extra baggage item. +// Delete a baggage item if provided blank value. +// +// The SpanContext is designed to be immutable and passed by value. As such, +// it cannot contain any locks, and should only hold immutable data, including baggage. +// Another reason for why baggage is immutable is when the span context is passed +// as a parent when starting a new span. The new span's baggage cannot affect the parent +// span's baggage, so the child span either needs to take a copy of the parent baggage +// (which is expensive and unnecessary since baggage rarely changes in the life span of +// a trace), or it needs to do a copy-on-write, which is the approach taken here. +func (c SpanContext) WithBaggageItem(key, value string) SpanContext { + var newBaggage map[string]string + // unset baggage item + if value == "" { + if _, ok := c.baggage[key]; !ok { + return c + } + newBaggage = make(map[string]string, len(c.baggage)) + for k, v := range c.baggage { + newBaggage[k] = v + } + delete(newBaggage, key) + return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote} + } + if c.baggage == nil { + newBaggage = map[string]string{key: value} + } else { + newBaggage = make(map[string]string, len(c.baggage)+1) + for k, v := range c.baggage { + newBaggage[k] = v + } + newBaggage[key] = value + } + // Use positional parameters so the compiler will help catch new fields. + return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote} +} + +// isDebugIDContainerOnly returns true when the instance of the context is only +// used to return the debug/correlation ID from extract() method. This happens +// in the situation when "jaeger-debug-id" header is passed in the carrier to +// the extract() method, but the request otherwise has no span context in it. +// Previously this would've returned opentracing.ErrSpanContextNotFound from the +// extract method, but now it returns a dummy context with only debugID filled in. +// +// See JaegerDebugHeader in constants.go +// See TextMapPropagator#Extract +func (c *SpanContext) isDebugIDContainerOnly() bool { + return !c.traceID.IsValid() && c.debugID != "" +} + +// ------- TraceID ------- + +func (t TraceID) String() string { + if t.High == 0 { + return fmt.Sprintf("%016x", t.Low) + } + return fmt.Sprintf("%016x%016x", t.High, t.Low) +} + +// TraceIDFromString creates a TraceID from a hexadecimal string +func TraceIDFromString(s string) (TraceID, error) { + var hi, lo uint64 + var err error + if len(s) > 32 { + return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s) + } else if len(s) > 16 { + hiLen := len(s) - 16 + if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil { + return TraceID{}, err + } + if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil { + return TraceID{}, err + } + } else { + if lo, err = strconv.ParseUint(s, 16, 64); err != nil { + return TraceID{}, err + } + } + return TraceID{High: hi, Low: lo}, nil +} + +// IsValid checks if the trace ID is valid, i.e. not zero. +func (t TraceID) IsValid() bool { + return t.High != 0 || t.Low != 0 +} + +// ------- SpanID ------- + +func (s SpanID) String() string { + return fmt.Sprintf("%016x", uint64(s)) +} + +// SpanIDFromString creates a SpanID from a hexadecimal string +func SpanIDFromString(s string) (SpanID, error) { + if len(s) > 16 { + return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s) + } + id, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return SpanID(0), err + } + return SpanID(id), nil +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go new file mode 100644 index 000000000..54cd3b086 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +var GoUnusedProtection__ int; + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go new file mode 100644 index 000000000..a0df50779 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go @@ -0,0 +1,28 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +import( + "bytes" + "context" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" + "github.com/uber/jaeger-client-go/thrift-gen/jaeger" + "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" + +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +func init() { +} + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go index a192cb6f4..6472e84e6 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go @@ -1,410 +1,396 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. package agent -import ( +import( "bytes" + "context" "fmt" - "github.com/apache/thrift/lib/go/thrift" + "time" + "github.com/uber/jaeger-client-go/thrift" "github.com/uber/jaeger-client-go/thrift-gen/jaeger" "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" + ) // (needed to ensure safety because of naive import list construction.) var _ = thrift.ZERO var _ = fmt.Printf +var _ = context.Background +var _ = time.Now var _ = bytes.Equal +var _ = jaeger.GoUnusedProtection__ var _ = zipkincore.GoUnusedProtection__ - type Agent interface { - // Parameters: - // - Spans - EmitZipkinBatch(spans []*zipkincore.Span) (err error) - // Parameters: - // - Batch - EmitBatch(batch *jaeger.Batch) (err error) + // Parameters: + // - Spans + EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) + // Parameters: + // - Batch + EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) } type AgentClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 + c thrift.TClient + meta thrift.ResponseMeta } func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } + return &AgentClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } } func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } + return &AgentClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewAgentClient(c thrift.TClient) *AgentClient { + return &AgentClient{ + c: c, + } +} + +func (p *AgentClient) Client_() thrift.TClient { + return p.c +} + +func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta } // Parameters: // - Spans -func (p *AgentClient) EmitZipkinBatch(spans []*zipkincore.Span) (err error) { - if err = p.sendEmitZipkinBatch(spans); err != nil { - return - } - return -} - -func (p *AgentClient) sendEmitZipkinBatch(spans []*zipkincore.Span) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("emitZipkinBatch", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := AgentEmitZipkinBatchArgs{ - Spans: spans, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() +func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) { + var _args0 AgentEmitZipkinBatchArgs + _args0.Spans = spans + p.SetLastResponseMeta_(thrift.ResponseMeta{}) + if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil { + return err + } + return nil } // Parameters: // - Batch -func (p *AgentClient) EmitBatch(batch *jaeger.Batch) (err error) { - if err = p.sendEmitBatch(batch); err != nil { - return - } - return -} - -func (p *AgentClient) sendEmitBatch(batch *jaeger.Batch) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := AgentEmitBatchArgs{ - Batch: batch, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() +func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) { + var _args1 AgentEmitBatchArgs + _args1.Batch = batch + p.SetLastResponseMeta_(thrift.ResponseMeta{}) + if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil { + return err + } + return nil } type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent + processorMap map[string]thrift.TProcessorFunction + handler Agent } func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor + p.processorMap[key] = processor } func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok + processor, ok = p.processorMap[key] + return processor, ok } func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap + return p.processorMap } func NewAgentProcessor(handler Agent) *AgentProcessor { - self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self0.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} - self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self0 + self2 := &AgentProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler:handler} + self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler:handler} +return self2 } -func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x1.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x1 +func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { return false, thrift.WrapTException(err2) } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x3.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x3 } type agentProcessorEmitZipkinBatch struct { - handler Agent + handler Agent } -func (p *agentProcessorEmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitZipkinBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } +func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitZipkinBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + _ = tickerCancel - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitZipkinBatch(args.Spans); err2 != nil { - return true, err2 - } - return true, nil + if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil { + tickerCancel() + return true, thrift.WrapTException(err2) + } + tickerCancel() + return true, nil } type agentProcessorEmitBatch struct { - handler Agent + handler Agent } -func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } +func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { - return true, err2 - } - return true, nil + tickerCancel := func() {} + _ = tickerCancel + + if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil { + tickerCancel() + return true, thrift.WrapTException(err2) + } + tickerCancel() + return true, nil } + // HELPER FUNCTIONS AND STRUCTURES // Attributes: // - Spans type AgentEmitZipkinBatchArgs struct { - Spans []*zipkincore.Span `thrift:"spans,1" json:"spans"` + Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"` } func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { - return &AgentEmitZipkinBatchArgs{} + return &AgentEmitZipkinBatchArgs{} } + func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { - return p.Spans -} -func (p *AgentEmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*zipkincore.Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem2 := &zipkincore.Span{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.Spans = append(p.Spans, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err + return p.Spans +} +func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*zipkincore.Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i ++ { + _elem4 := &zipkincore.Span{} + if err := _elem4.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Spans = append(p.Spans, _elem4) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) } + return err } func (p *AgentEmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) } // Attributes: // - Batch type AgentEmitBatchArgs struct { - Batch *jaeger.Batch `thrift:"batch,1" json:"batch"` + Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"` } func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} + return &AgentEmitBatchArgs{} } var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch - func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch + if !p.IsSetBatch() { + return AgentEmitBatchArgs_Batch_DEFAULT + } +return p.Batch } func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { - p.Batch = &jaeger.Batch{} - if err := p.Batch.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err + return p.Batch != nil +} + +func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Batch = &jaeger.Batch{} + if err := p.Batch.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) } + if err := p.Batch.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) } + return err } func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) } + + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go deleted file mode 100644 index 369d016ea..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/constants.go +++ /dev/null @@ -1,21 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package agent - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var _ = zipkincore.GoUnusedProtection__ - -func init() { -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go deleted file mode 100644 index f377c2276..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/ttypes.go +++ /dev/null @@ -1,19 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package agent - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var _ = zipkincore.GoUnusedProtection__ -var GoUnusedProtection__ int diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go new file mode 100644 index 000000000..712b6a9da --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package baggage + +var GoUnusedProtection__ int; + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go new file mode 100644 index 000000000..39b5a7ee7 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go @@ -0,0 +1,23 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package baggage + +import( + "bytes" + "context" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + + +func init() { +} + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go new file mode 100644 index 000000000..e4d89d5d5 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go @@ -0,0 +1,565 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package baggage + +import( + "bytes" + "context" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +// Attributes: +// - BaggageKey +// - MaxValueLength +type BaggageRestriction struct { + BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"` + MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"` +} + +func NewBaggageRestriction() *BaggageRestriction { + return &BaggageRestriction{} +} + + +func (p *BaggageRestriction) GetBaggageKey() string { + return p.BaggageKey +} + +func (p *BaggageRestriction) GetMaxValueLength() int32 { + return p.MaxValueLength +} +func (p *BaggageRestriction) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetBaggageKey bool = false; + var issetMaxValueLength bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetBaggageKey = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetMaxValueLength = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetBaggageKey{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set")); + } + if !issetMaxValueLength{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set")); + } + return nil +} + +func (p *BaggageRestriction) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.BaggageKey = v +} + return nil +} + +func (p *BaggageRestriction) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.MaxValueLength = v +} + return nil +} + +func (p *BaggageRestriction) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BaggageRestriction"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BaggageRestriction) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "baggageKey", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) } + if err := oprot.WriteString(ctx, string(p.BaggageKey)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) } + return err +} + +func (p *BaggageRestriction) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "maxValueLength", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.MaxValueLength)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) } + return err +} + +func (p *BaggageRestriction) Equals(other *BaggageRestriction) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.BaggageKey != other.BaggageKey { return false } + if p.MaxValueLength != other.MaxValueLength { return false } + return true +} + +func (p *BaggageRestriction) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BaggageRestriction(%+v)", *p) +} + +type BaggageRestrictionManager interface { + // getBaggageRestrictions retrieves the baggage restrictions for a specific service. + // Usually, baggageRestrictions apply to all services however there may be situations + // where a baggageKey might only be allowed to be set by a specific service. + // + // Parameters: + // - ServiceName + GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) +} + +type BaggageRestrictionManagerClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient { + return &BaggageRestrictionManagerClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient { + return &BaggageRestrictionManagerClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient { + return &BaggageRestrictionManagerClient{ + c: c, + } +} + +func (p *BaggageRestrictionManagerClient) Client_() thrift.TClient { + return p.c +} + +func (p *BaggageRestrictionManagerClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *BaggageRestrictionManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// getBaggageRestrictions retrieves the baggage restrictions for a specific service. +// Usually, baggageRestrictions apply to all services however there may be situations +// where a baggageKey might only be allowed to be set by a specific service. +// +// Parameters: +// - ServiceName +func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) { + var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs + _args0.ServiceName = serviceName + var _result2 BaggageRestrictionManagerGetBaggageRestrictionsResult + var _meta1 thrift.ResponseMeta + _meta1, _err = p.Client_().Call(ctx, "getBaggageRestrictions", &_args0, &_result2) + p.SetLastResponseMeta_(_meta1) + if _err != nil { + return + } + return _result2.GetSuccess(), nil +} + +type BaggageRestrictionManagerProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler BaggageRestrictionManager +} + +func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor { + + self3 := &BaggageRestrictionManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self3.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler:handler} +return self3 +} + +func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { return false, thrift.WrapTException(err2) } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x4.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x4 + +} + +type baggageRestrictionManagerProcessorGetBaggageRestrictions struct { + handler BaggageRestrictionManager +} + +func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} + var retval []*BaggageRestriction + if retval, err2 = p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: " + err2.Error()) + oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - ServiceName +type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct { + ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"` +} + +func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs { + return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{} +} + + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string { + return p.ServiceName +} +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.ServiceName = v +} + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) } + return err +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p) +} + +// Attributes: +// - Success +type BaggageRestrictionManagerGetBaggageRestrictionsResult struct { + Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult { + return &BaggageRestrictionManagerGetBaggageRestrictionsResult{} +} + +var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction { + return p.Success +} +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.LIST { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BaggageRestriction, 0, size) + p.Success = tSlice + for i := 0; i < size; i ++ { + _elem5 := &BaggageRestriction{} + if err := _elem5.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + p.Success = append(p.Success, _elem5) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p) +} + + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go deleted file mode 100644 index 1931cd72c..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggagerestrictionmanager.go +++ /dev/null @@ -1,435 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type BaggageRestrictionManager interface { - // getBaggageRestrictions retrieves the baggage restrictions for a specific service. - // Usually, baggageRestrictions apply to all services however there may be situations - // where a baggageKey might only be allowed to be set by a specific service. - // - // Parameters: - // - ServiceName - GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) -} - -type BaggageRestrictionManagerClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// getBaggageRestrictions retrieves the baggage restrictions for a specific service. -// Usually, baggageRestrictions apply to all services however there may be situations -// where a baggageKey might only be allowed to be set by a specific service. -// -// Parameters: -// - ServiceName -func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(serviceName string) (r []*BaggageRestriction, err error) { - if err = p.sendGetBaggageRestrictions(serviceName); err != nil { - return - } - return p.recvGetBaggageRestrictions() -} - -func (p *BaggageRestrictionManagerClient) sendGetBaggageRestrictions(serviceName string) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.CALL, p.SeqId); err != nil { - return - } - args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{ - ServiceName: serviceName, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *BaggageRestrictionManagerClient) recvGetBaggageRestrictions() (value []*BaggageRestriction, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "getBaggageRestrictions" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getBaggageRestrictions failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getBaggageRestrictions failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error1 error - error1, err = error0.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error1 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getBaggageRestrictions failed: invalid message type") - return - } - result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -type BaggageRestrictionManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler BaggageRestrictionManager -} - -func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor { - - self2 := &BaggageRestrictionManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self2.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler: handler} - return self2 -} - -func (p *BaggageRestrictionManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x3.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x3 - -} - -type baggageRestrictionManagerProcessorGetBaggageRestrictions struct { - handler BaggageRestrictionManager -} - -func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} - var retval []*BaggageRestriction - var err2 error - if retval, err2 = p.handler.GetBaggageRestrictions(args.ServiceName); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: "+err2.Error()) - oprot.WriteMessageBegin("getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct { - ServiceName string `thrift:"serviceName,1" json:"serviceName"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs { - return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{} -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string { - return p.ServiceName -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getBaggageRestrictions_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p) -} - -// Attributes: -// - Success -type BaggageRestrictionManagerGetBaggageRestrictionsResult struct { - Success []*BaggageRestriction `thrift:"success,0" json:"success,omitempty"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult { - return &BaggageRestrictionManagerGetBaggageRestrictionsResult{} -} - -var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction { - return p.Success -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) readField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BaggageRestriction, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem4 := &BaggageRestriction{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Success = append(p.Success, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getBaggageRestrictions_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go deleted file mode 100644 index 6668424a5..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go deleted file mode 100644 index be442fbd9..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/ttypes.go +++ /dev/null @@ -1,154 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package baggage - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -// Attributes: -// - BaggageKey -// - MaxValueLength -type BaggageRestriction struct { - BaggageKey string `thrift:"baggageKey,1,required" json:"baggageKey"` - MaxValueLength int32 `thrift:"maxValueLength,2,required" json:"maxValueLength"` -} - -func NewBaggageRestriction() *BaggageRestriction { - return &BaggageRestriction{} -} - -func (p *BaggageRestriction) GetBaggageKey() string { - return p.BaggageKey -} - -func (p *BaggageRestriction) GetMaxValueLength() int32 { - return p.MaxValueLength -} -func (p *BaggageRestriction) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetBaggageKey bool = false - var issetMaxValueLength bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetBaggageKey = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetMaxValueLength = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetBaggageKey { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set")) - } - if !issetMaxValueLength { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set")) - } - return nil -} - -func (p *BaggageRestriction) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.BaggageKey = v - } - return nil -} - -func (p *BaggageRestriction) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.MaxValueLength = v - } - return nil -} - -func (p *BaggageRestriction) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BaggageRestriction"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BaggageRestriction) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("baggageKey", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) - } - if err := oprot.WriteString(string(p.BaggageKey)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) - } - return err -} - -func (p *BaggageRestriction) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("maxValueLength", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) - } - if err := oprot.WriteI32(int32(p.MaxValueLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) - } - return err -} - -func (p *BaggageRestriction) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestriction(%+v)", *p) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go new file mode 100644 index 000000000..fe45a9f9a --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package jaeger + +var GoUnusedProtection__ int; + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go deleted file mode 100644 index db6cac9fc..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/agent.go +++ /dev/null @@ -1,242 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type Agent interface { - // Parameters: - // - Batch - EmitBatch(batch *Batch) (err error) -} - -type AgentClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(batch *Batch) (err error) { - if err = p.sendEmitBatch(batch); err != nil { - return - } - return -} - -func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := AgentEmitBatchArgs{ - Batch: batch, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self6 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self6.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self6 -} - -func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x7 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x7.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x7 - -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { - return true, err2 - } - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *Batch `thrift:"batch,1" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *Batch - -func (p *AgentEmitBatchArgs) GetBatch() *Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { - p.Batch = &Batch{} - if err := p.Batch.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go deleted file mode 100644 index 250474222..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go new file mode 100644 index 000000000..b6ce85570 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go @@ -0,0 +1,23 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package jaeger + +import( + "bytes" + "context" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + + +func init() { +} + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go new file mode 100644 index 000000000..d55cca024 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go @@ -0,0 +1,2698 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package jaeger + +import( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +type TagType int64 +const ( + TagType_STRING TagType = 0 + TagType_DOUBLE TagType = 1 + TagType_BOOL TagType = 2 + TagType_LONG TagType = 3 + TagType_BINARY TagType = 4 +) + +func (p TagType) String() string { + switch p { + case TagType_STRING: return "STRING" + case TagType_DOUBLE: return "DOUBLE" + case TagType_BOOL: return "BOOL" + case TagType_LONG: return "LONG" + case TagType_BINARY: return "BINARY" + } + return "" +} + +func TagTypeFromString(s string) (TagType, error) { + switch s { + case "STRING": return TagType_STRING, nil + case "DOUBLE": return TagType_DOUBLE, nil + case "BOOL": return TagType_BOOL, nil + case "LONG": return TagType_LONG, nil + case "BINARY": return TagType_BINARY, nil + } + return TagType(0), fmt.Errorf("not a valid TagType string") +} + + +func TagTypePtr(v TagType) *TagType { return &v } + +func (p TagType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *TagType) UnmarshalText(text []byte) error { +q, err := TagTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *TagType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = TagType(v) +return nil +} + +func (p * TagType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +type SpanRefType int64 +const ( + SpanRefType_CHILD_OF SpanRefType = 0 + SpanRefType_FOLLOWS_FROM SpanRefType = 1 +) + +func (p SpanRefType) String() string { + switch p { + case SpanRefType_CHILD_OF: return "CHILD_OF" + case SpanRefType_FOLLOWS_FROM: return "FOLLOWS_FROM" + } + return "" +} + +func SpanRefTypeFromString(s string) (SpanRefType, error) { + switch s { + case "CHILD_OF": return SpanRefType_CHILD_OF, nil + case "FOLLOWS_FROM": return SpanRefType_FOLLOWS_FROM, nil + } + return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") +} + + +func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } + +func (p SpanRefType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *SpanRefType) UnmarshalText(text []byte) error { +q, err := SpanRefTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *SpanRefType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = SpanRefType(v) +return nil +} + +func (p * SpanRefType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +// Attributes: +// - Key +// - VType +// - VStr +// - VDouble +// - VBool +// - VLong +// - VBinary +type Tag struct { + Key string `thrift:"key,1,required" db:"key" json:"key"` + VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"` + VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"` + VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"` + VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"` + VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"` + VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"` +} + +func NewTag() *Tag { + return &Tag{} +} + + +func (p *Tag) GetKey() string { + return p.Key +} + +func (p *Tag) GetVType() TagType { + return p.VType +} +var Tag_VStr_DEFAULT string +func (p *Tag) GetVStr() string { + if !p.IsSetVStr() { + return Tag_VStr_DEFAULT + } +return *p.VStr +} +var Tag_VDouble_DEFAULT float64 +func (p *Tag) GetVDouble() float64 { + if !p.IsSetVDouble() { + return Tag_VDouble_DEFAULT + } +return *p.VDouble +} +var Tag_VBool_DEFAULT bool +func (p *Tag) GetVBool() bool { + if !p.IsSetVBool() { + return Tag_VBool_DEFAULT + } +return *p.VBool +} +var Tag_VLong_DEFAULT int64 +func (p *Tag) GetVLong() int64 { + if !p.IsSetVLong() { + return Tag_VLong_DEFAULT + } +return *p.VLong +} +var Tag_VBinary_DEFAULT []byte + +func (p *Tag) GetVBinary() []byte { + return p.VBinary +} +func (p *Tag) IsSetVStr() bool { + return p.VStr != nil +} + +func (p *Tag) IsSetVDouble() bool { + return p.VDouble != nil +} + +func (p *Tag) IsSetVBool() bool { + return p.VBool != nil +} + +func (p *Tag) IsSetVLong() bool { + return p.VLong != nil +} + +func (p *Tag) IsSetVBinary() bool { + return p.VBinary != nil +} + +func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetKey bool = false; + var issetVType bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetKey = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetVType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetKey{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")); + } + if !issetVType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")); + } + return nil +} + +func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Key = v +} + return nil +} + +func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + temp := TagType(v) + p.VType = temp +} + return nil +} + +func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.VStr = &v +} + return nil +} + +func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.VDouble = &v +} + return nil +} + +func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.VBool = &v +} + return nil +} + +func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) +} else { + p.VLong = &v +} + return nil +} + +func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.VBinary = v +} + return nil +} + +func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } + return err +} + +func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) } + return err +} + +func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVStr() { + if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) } + if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) } + } + return err +} + +func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVDouble() { + if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) } + } + return err +} + +func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVBool() { + if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) } + if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) } + } + return err +} + +func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVLong() { + if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) } + } + return err +} + +func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVBinary() { + if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) } + if err := oprot.WriteBinary(ctx, p.VBinary); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) } + } + return err +} + +func (p *Tag) Equals(other *Tag) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { return false } + if p.VType != other.VType { return false } + if p.VStr != other.VStr { + if p.VStr == nil || other.VStr == nil { + return false + } + if (*p.VStr) != (*other.VStr) { return false } + } + if p.VDouble != other.VDouble { + if p.VDouble == nil || other.VDouble == nil { + return false + } + if (*p.VDouble) != (*other.VDouble) { return false } + } + if p.VBool != other.VBool { + if p.VBool == nil || other.VBool == nil { + return false + } + if (*p.VBool) != (*other.VBool) { return false } + } + if p.VLong != other.VLong { + if p.VLong == nil || other.VLong == nil { + return false + } + if (*p.VLong) != (*other.VLong) { return false } + } + if bytes.Compare(p.VBinary, other.VBinary) != 0 { return false } + return true +} + +func (p *Tag) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Tag(%+v)", *p) +} + +// Attributes: +// - Timestamp +// - Fields +type Log struct { + Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"` + Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"` +} + +func NewLog() *Log { + return &Log{} +} + + +func (p *Log) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Log) GetFields() []*Tag { + return p.Fields +} +func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTimestamp bool = false; + var issetFields bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetTimestamp = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetFields = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTimestamp{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")); + } + if !issetFields{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")); + } + return nil +} + +func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Timestamp = v +} + return nil +} + +func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Fields = tSlice + for i := 0; i < size; i ++ { + _elem0 := &Tag{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Fields = append(p.Fields, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Log"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) } + return err +} + +func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Fields { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) } + return err +} + +func (p *Log) Equals(other *Log) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { return false } + if len(p.Fields) != len(other.Fields) { return false } + for i, _tgt := range p.Fields { + _src1 := other.Fields[i] + if !_tgt.Equals(_src1) { return false } + } + return true +} + +func (p *Log) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Log(%+v)", *p) +} + +// Attributes: +// - RefType +// - TraceIdLow +// - TraceIdHigh +// - SpanId +type SpanRef struct { + RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"` + TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"` + TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"` + SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"` +} + +func NewSpanRef() *SpanRef { + return &SpanRef{} +} + + +func (p *SpanRef) GetRefType() SpanRefType { + return p.RefType +} + +func (p *SpanRef) GetTraceIdLow() int64 { + return p.TraceIdLow +} + +func (p *SpanRef) GetTraceIdHigh() int64 { + return p.TraceIdHigh +} + +func (p *SpanRef) GetSpanId() int64 { + return p.SpanId +} +func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetRefType bool = false; + var issetTraceIdLow bool = false; + var issetTraceIdHigh bool = false; + var issetSpanId bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetRefType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTraceIdLow = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetTraceIdHigh = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetRefType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")); + } + if !issetTraceIdLow{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")); + } + if !issetTraceIdHigh{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")); + } + if !issetSpanId{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")); + } + return nil +} + +func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := SpanRefType(v) + p.RefType = temp +} + return nil +} + +func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.TraceIdLow = v +} + return nil +} + +func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.TraceIdHigh = v +} + return nil +} + +func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.SpanId = v +} + return nil +} + +func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) } + return err +} + +func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) } + return err +} + +func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) } + return err +} + +func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) } + return err +} + +func (p *SpanRef) Equals(other *SpanRef) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.RefType != other.RefType { return false } + if p.TraceIdLow != other.TraceIdLow { return false } + if p.TraceIdHigh != other.TraceIdHigh { return false } + if p.SpanId != other.SpanId { return false } + return true +} + +func (p *SpanRef) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SpanRef(%+v)", *p) +} + +// Attributes: +// - TraceIdLow +// - TraceIdHigh +// - SpanId +// - ParentSpanId +// - OperationName +// - References +// - Flags +// - StartTime +// - Duration +// - Tags +// - Logs +type Span struct { + TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"` + TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"` + SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"` + ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"` + OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"` + References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"` + Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"` + StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"` + Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"` + Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"` + Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + + +func (p *Span) GetTraceIdLow() int64 { + return p.TraceIdLow +} + +func (p *Span) GetTraceIdHigh() int64 { + return p.TraceIdHigh +} + +func (p *Span) GetSpanId() int64 { + return p.SpanId +} + +func (p *Span) GetParentSpanId() int64 { + return p.ParentSpanId +} + +func (p *Span) GetOperationName() string { + return p.OperationName +} +var Span_References_DEFAULT []*SpanRef + +func (p *Span) GetReferences() []*SpanRef { + return p.References +} + +func (p *Span) GetFlags() int32 { + return p.Flags +} + +func (p *Span) GetStartTime() int64 { + return p.StartTime +} + +func (p *Span) GetDuration() int64 { + return p.Duration +} +var Span_Tags_DEFAULT []*Tag + +func (p *Span) GetTags() []*Tag { + return p.Tags +} +var Span_Logs_DEFAULT []*Log + +func (p *Span) GetLogs() []*Log { + return p.Logs +} +func (p *Span) IsSetReferences() bool { + return p.References != nil +} + +func (p *Span) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Span) IsSetLogs() bool { + return p.Logs != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTraceIdLow bool = false; + var issetTraceIdHigh bool = false; + var issetSpanId bool = false; + var issetParentSpanId bool = false; + var issetOperationName bool = false; + var issetFlags bool = false; + var issetStartTime bool = false; + var issetDuration bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetTraceIdLow = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTraceIdHigh = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetParentSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + issetOperationName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + issetFlags = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I64 { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + issetStartTime = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I64 { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + issetDuration = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.LIST { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.LIST { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTraceIdLow{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")); + } + if !issetTraceIdHigh{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")); + } + if !issetSpanId{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")); + } + if !issetParentSpanId{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")); + } + if !issetOperationName{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")); + } + if !issetFlags{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")); + } + if !issetStartTime{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")); + } + if !issetDuration{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")); + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.TraceIdLow = v +} + return nil +} + +func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.TraceIdHigh = v +} + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.SpanId = v +} + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.ParentSpanId = v +} + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.OperationName = v +} + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*SpanRef, 0, size) + p.References = tSlice + for i := 0; i < size; i ++ { + _elem2 := &SpanRef{} + if err := _elem2.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.References = append(p.References, _elem2) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) +} else { + p.Flags = v +} + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 8: ", err) +} else { + p.StartTime = v +} + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.Duration = v +} + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i ++ { + _elem3 := &Tag{} + if err := _elem3.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.Tags = append(p.Tags, _elem3) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Log, 0, size) + p.Logs = tSlice + for i := 0; i < size; i ++ { + _elem4 := &Log{} + if err := _elem4.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Logs = append(p.Logs, _elem4) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField7(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField9(ctx, oprot); err != nil { return err } + if err := p.writeField10(ctx, oprot); err != nil { return err } + if err := p.writeField11(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) } + return err +} + +func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) } + if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetReferences() { + if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.References { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) } + } + return err +} + +func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetLogs() { + if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Logs { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceIdLow != other.TraceIdLow { return false } + if p.TraceIdHigh != other.TraceIdHigh { return false } + if p.SpanId != other.SpanId { return false } + if p.ParentSpanId != other.ParentSpanId { return false } + if p.OperationName != other.OperationName { return false } + if len(p.References) != len(other.References) { return false } + for i, _tgt := range p.References { + _src5 := other.References[i] + if !_tgt.Equals(_src5) { return false } + } + if p.Flags != other.Flags { return false } + if p.StartTime != other.StartTime { return false } + if p.Duration != other.Duration { return false } + if len(p.Tags) != len(other.Tags) { return false } + for i, _tgt := range p.Tags { + _src6 := other.Tags[i] + if !_tgt.Equals(_src6) { return false } + } + if len(p.Logs) != len(other.Logs) { return false } + for i, _tgt := range p.Logs { + _src7 := other.Logs[i] + if !_tgt.Equals(_src7) { return false } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - ServiceName +// - Tags +type Process struct { + ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"` + Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"` +} + +func NewProcess() *Process { + return &Process{} +} + + +func (p *Process) GetServiceName() string { + return p.ServiceName +} +var Process_Tags_DEFAULT []*Tag + +func (p *Process) GetTags() []*Tag { + return p.Tags +} +func (p *Process) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetServiceName bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetServiceName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetServiceName{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")); + } + return nil +} + +func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.ServiceName = v +} + return nil +} + +func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i ++ { + _elem8 := &Tag{} + if err := _elem8.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) + } + p.Tags = append(p.Tags, _elem8) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Process"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) } + return err +} + +func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) } + } + return err +} + +func (p *Process) Equals(other *Process) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ServiceName != other.ServiceName { return false } + if len(p.Tags) != len(other.Tags) { return false } + for i, _tgt := range p.Tags { + _src9 := other.Tags[i] + if !_tgt.Equals(_src9) { return false } + } + return true +} + +func (p *Process) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Process(%+v)", *p) +} + +// Attributes: +// - FullQueueDroppedSpans +// - TooLargeDroppedSpans +// - FailedToEmitSpans +type ClientStats struct { + FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"` + TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"` + FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"` +} + +func NewClientStats() *ClientStats { + return &ClientStats{} +} + + +func (p *ClientStats) GetFullQueueDroppedSpans() int64 { + return p.FullQueueDroppedSpans +} + +func (p *ClientStats) GetTooLargeDroppedSpans() int64 { + return p.TooLargeDroppedSpans +} + +func (p *ClientStats) GetFailedToEmitSpans() int64 { + return p.FailedToEmitSpans +} +func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetFullQueueDroppedSpans bool = false; + var issetTooLargeDroppedSpans bool = false; + var issetFailedToEmitSpans bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetFullQueueDroppedSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTooLargeDroppedSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetFailedToEmitSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetFullQueueDroppedSpans{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set")); + } + if !issetTooLargeDroppedSpans{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set")); + } + if !issetFailedToEmitSpans{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set")); + } + return nil +} + +func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.FullQueueDroppedSpans = v +} + return nil +} + +func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.TooLargeDroppedSpans = v +} + return nil +} + +func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.FailedToEmitSpans = v +} + return nil +} + +func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) } + return err +} + +func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) } + return err +} + +func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) } + return err +} + +func (p *ClientStats) Equals(other *ClientStats) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { return false } + if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { return false } + if p.FailedToEmitSpans != other.FailedToEmitSpans { return false } + return true +} + +func (p *ClientStats) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ClientStats(%+v)", *p) +} + +// Attributes: +// - Process +// - Spans +// - SeqNo +// - Stats +type Batch struct { + Process *Process `thrift:"process,1,required" db:"process" json:"process"` + Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` + SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"` + Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"` +} + +func NewBatch() *Batch { + return &Batch{} +} + +var Batch_Process_DEFAULT *Process +func (p *Batch) GetProcess() *Process { + if !p.IsSetProcess() { + return Batch_Process_DEFAULT + } +return p.Process +} + +func (p *Batch) GetSpans() []*Span { + return p.Spans +} +var Batch_SeqNo_DEFAULT int64 +func (p *Batch) GetSeqNo() int64 { + if !p.IsSetSeqNo() { + return Batch_SeqNo_DEFAULT + } +return *p.SeqNo +} +var Batch_Stats_DEFAULT *ClientStats +func (p *Batch) GetStats() *ClientStats { + if !p.IsSetStats() { + return Batch_Stats_DEFAULT + } +return p.Stats +} +func (p *Batch) IsSetProcess() bool { + return p.Process != nil +} + +func (p *Batch) IsSetSeqNo() bool { + return p.SeqNo != nil +} + +func (p *Batch) IsSetStats() bool { + return p.Stats != nil +} + +func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetProcess bool = false; + var issetSpans bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetProcess = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetProcess{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")); + } + if !issetSpans{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")); + } + return nil +} + +func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Process = &Process{} + if err := p.Process.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) + } + return nil +} + +func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i ++ { + _elem10 := &Span{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Spans = append(p.Spans, _elem10) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.SeqNo = &v +} + return nil +} + +func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Stats = &ClientStats{} + if err := p.Stats.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err) + } + return nil +} + +func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) } + if err := p.Process.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) } + return err +} + +func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) } + return err +} + +func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSeqNo() { + if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) } + } + return err +} + +func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStats() { + if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) } + if err := p.Stats.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) } + } + return err +} + +func (p *Batch) Equals(other *Batch) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Process.Equals(other.Process) { return false } + if len(p.Spans) != len(other.Spans) { return false } + for i, _tgt := range p.Spans { + _src11 := other.Spans[i] + if !_tgt.Equals(_src11) { return false } + } + if p.SeqNo != other.SeqNo { + if p.SeqNo == nil || other.SeqNo == nil { + return false + } + if (*p.SeqNo) != (*other.SeqNo) { return false } + } + if !p.Stats.Equals(other.Stats) { return false } + return true +} + +func (p *Batch) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Batch(%+v)", *p) +} + +// Attributes: +// - Ok +type BatchSubmitResponse struct { + Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` +} + +func NewBatchSubmitResponse() *BatchSubmitResponse { + return &BatchSubmitResponse{} +} + + +func (p *BatchSubmitResponse) GetOk() bool { + return p.Ok +} +func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOk = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")); + } + return nil +} + +func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Ok = v +} + return nil +} + +func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) } + return err +} + +func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ok != other.Ok { return false } + return true +} + +func (p *BatchSubmitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) +} + +type Collector interface { + // Parameters: + // - Batches + SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) +} + +type CollectorClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient { + return &CollectorClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient { + return &CollectorClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewCollectorClient(c thrift.TClient) *CollectorClient { + return &CollectorClient{ + c: c, + } +} + +func (p *CollectorClient) Client_() thrift.TClient { + return p.c +} + +func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Batches +func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) { + var _args12 CollectorSubmitBatchesArgs + _args12.Batches = batches + var _result14 CollectorSubmitBatchesResult + var _meta13 thrift.ResponseMeta + _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14) + p.SetLastResponseMeta_(_meta13) + if _err != nil { + return + } + return _result14.GetSuccess(), nil +} + +type CollectorProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Collector +} + +func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewCollectorProcessor(handler Collector) *CollectorProcessor { + + self15 := &CollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler:handler} +return self15 +} + +func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { return false, thrift.WrapTException(err2) } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x16.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x16 + +} + +type collectorProcessorSubmitBatches struct { + handler Collector +} + +func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := CollectorSubmitBatchesArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := CollectorSubmitBatchesResult{} + var retval []*BatchSubmitResponse + if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: " + err2.Error()) + oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Batches +type CollectorSubmitBatchesArgs struct { + Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"` +} + +func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs { + return &CollectorSubmitBatchesArgs{} +} + + +func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch { + return p.Batches +} +func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Batch, 0, size) + p.Batches = tSlice + for i := 0; i < size; i ++ { + _elem17 := &Batch{} + if err := _elem17.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) + } + p.Batches = append(p.Batches, _elem17) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Batches { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) } + return err +} + +func (p *CollectorSubmitBatchesArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p) +} + +// Attributes: +// - Success +type CollectorSubmitBatchesResult struct { + Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult { + return &CollectorSubmitBatchesResult{} +} + +var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse + +func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse { + return p.Success +} +func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.LIST { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BatchSubmitResponse, 0, size) + p.Success = tSlice + for i := 0; i < size; i ++ { + _elem18 := &BatchSubmitResponse{} + if err := _elem18.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) + } + p.Success = append(p.Success, _elem18) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *CollectorSubmitBatchesResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p) +} + + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go deleted file mode 100644 index b5ddfa645..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/ttypes.go +++ /dev/null @@ -1,1838 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -type TagType int64 - -const ( - TagType_STRING TagType = 0 - TagType_DOUBLE TagType = 1 - TagType_BOOL TagType = 2 - TagType_LONG TagType = 3 - TagType_BINARY TagType = 4 -) - -func (p TagType) String() string { - switch p { - case TagType_STRING: - return "STRING" - case TagType_DOUBLE: - return "DOUBLE" - case TagType_BOOL: - return "BOOL" - case TagType_LONG: - return "LONG" - case TagType_BINARY: - return "BINARY" - } - return "" -} - -func TagTypeFromString(s string) (TagType, error) { - switch s { - case "STRING": - return TagType_STRING, nil - case "DOUBLE": - return TagType_DOUBLE, nil - case "BOOL": - return TagType_BOOL, nil - case "LONG": - return TagType_LONG, nil - case "BINARY": - return TagType_BINARY, nil - } - return TagType(0), fmt.Errorf("not a valid TagType string") -} - -func TagTypePtr(v TagType) *TagType { return &v } - -func (p TagType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *TagType) UnmarshalText(text []byte) error { - q, err := TagTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -type SpanRefType int64 - -const ( - SpanRefType_CHILD_OF SpanRefType = 0 - SpanRefType_FOLLOWS_FROM SpanRefType = 1 -) - -func (p SpanRefType) String() string { - switch p { - case SpanRefType_CHILD_OF: - return "CHILD_OF" - case SpanRefType_FOLLOWS_FROM: - return "FOLLOWS_FROM" - } - return "" -} - -func SpanRefTypeFromString(s string) (SpanRefType, error) { - switch s { - case "CHILD_OF": - return SpanRefType_CHILD_OF, nil - case "FOLLOWS_FROM": - return SpanRefType_FOLLOWS_FROM, nil - } - return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") -} - -func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } - -func (p SpanRefType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *SpanRefType) UnmarshalText(text []byte) error { - q, err := SpanRefTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -// Attributes: -// - Key -// - VType -// - VStr -// - VDouble -// - VBool -// - VLong -// - VBinary -type Tag struct { - Key string `thrift:"key,1,required" json:"key"` - VType TagType `thrift:"vType,2,required" json:"vType"` - VStr *string `thrift:"vStr,3" json:"vStr,omitempty"` - VDouble *float64 `thrift:"vDouble,4" json:"vDouble,omitempty"` - VBool *bool `thrift:"vBool,5" json:"vBool,omitempty"` - VLong *int64 `thrift:"vLong,6" json:"vLong,omitempty"` - VBinary []byte `thrift:"vBinary,7" json:"vBinary,omitempty"` -} - -func NewTag() *Tag { - return &Tag{} -} - -func (p *Tag) GetKey() string { - return p.Key -} - -func (p *Tag) GetVType() TagType { - return p.VType -} - -var Tag_VStr_DEFAULT string - -func (p *Tag) GetVStr() string { - if !p.IsSetVStr() { - return Tag_VStr_DEFAULT - } - return *p.VStr -} - -var Tag_VDouble_DEFAULT float64 - -func (p *Tag) GetVDouble() float64 { - if !p.IsSetVDouble() { - return Tag_VDouble_DEFAULT - } - return *p.VDouble -} - -var Tag_VBool_DEFAULT bool - -func (p *Tag) GetVBool() bool { - if !p.IsSetVBool() { - return Tag_VBool_DEFAULT - } - return *p.VBool -} - -var Tag_VLong_DEFAULT int64 - -func (p *Tag) GetVLong() int64 { - if !p.IsSetVLong() { - return Tag_VLong_DEFAULT - } - return *p.VLong -} - -var Tag_VBinary_DEFAULT []byte - -func (p *Tag) GetVBinary() []byte { - return p.VBinary -} -func (p *Tag) IsSetVStr() bool { - return p.VStr != nil -} - -func (p *Tag) IsSetVDouble() bool { - return p.VDouble != nil -} - -func (p *Tag) IsSetVBool() bool { - return p.VBool != nil -} - -func (p *Tag) IsSetVLong() bool { - return p.VLong != nil -} - -func (p *Tag) IsSetVBinary() bool { - return p.VBinary != nil -} - -func (p *Tag) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetKey bool = false - var issetVType bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetKey = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetVType = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 7: - if err := p.readField7(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetKey { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")) - } - if !issetVType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")) - } - return nil -} - -func (p *Tag) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *Tag) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - temp := TagType(v) - p.VType = temp - } - return nil -} - -func (p *Tag) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.VStr = &v - } - return nil -} - -func (p *Tag) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.VDouble = &v - } - return nil -} - -func (p *Tag) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.VBool = &v - } - return nil -} - -func (p *Tag) readField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.VLong = &v - } - return nil -} - -func (p *Tag) readField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.VBinary = v - } - return nil -} - -func (p *Tag) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Tag"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) - } - if err := oprot.WriteI32(int32(p.VType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) - } - return err -} - -func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetVStr() { - if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) - } - if err := oprot.WriteString(string(*p.VStr)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) - } - } - return err -} - -func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetVDouble() { - if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) - } - if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) - } - } - return err -} - -func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetVBool() { - if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) - } - if err := oprot.WriteBool(bool(*p.VBool)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) - } - } - return err -} - -func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetVLong() { - if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) - } - if err := oprot.WriteI64(int64(*p.VLong)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) - } - } - return err -} - -func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetVBinary() { - if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) - } - if err := oprot.WriteBinary(p.VBinary); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) - } - } - return err -} - -func (p *Tag) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Tag(%+v)", *p) -} - -// Attributes: -// - Timestamp -// - Fields -type Log struct { - Timestamp int64 `thrift:"timestamp,1,required" json:"timestamp"` - Fields []*Tag `thrift:"fields,2,required" json:"fields"` -} - -func NewLog() *Log { - return &Log{} -} - -func (p *Log) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Log) GetFields() []*Tag { - return p.Fields -} -func (p *Log) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTimestamp bool = false - var issetFields bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTimestamp = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetFields = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTimestamp { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) - } - if !issetFields { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")) - } - return nil -} - -func (p *Log) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Log) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Fields = tSlice - for i := 0; i < size; i++ { - _elem0 := &Tag{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Fields = append(p.Fields, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Log) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Log"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Log) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Log) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Fields { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) - } - return err -} - -func (p *Log) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Log(%+v)", *p) -} - -// Attributes: -// - RefType -// - TraceIdLow -// - TraceIdHigh -// - SpanId -type SpanRef struct { - RefType SpanRefType `thrift:"refType,1,required" json:"refType"` - TraceIdLow int64 `thrift:"traceIdLow,2,required" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,3,required" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,4,required" json:"spanId"` -} - -func NewSpanRef() *SpanRef { - return &SpanRef{} -} - -func (p *SpanRef) GetRefType() SpanRefType { - return p.RefType -} - -func (p *SpanRef) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *SpanRef) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *SpanRef) GetSpanId() int64 { - return p.SpanId -} -func (p *SpanRef) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRefType bool = false - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetRefType = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTraceIdLow = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetTraceIdHigh = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - issetSpanId = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRefType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - return nil -} - -func (p *SpanRef) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := SpanRefType(v) - p.RefType = temp - } - return nil -} - -func (p *SpanRef) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *SpanRef) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *SpanRef) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *SpanRef) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("SpanRef"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) - } - if err := oprot.WriteI32(int32(p.RefType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) - } - return err -} - -func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) - } - return err -} - -func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) - } - return err -} - -func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) - } - return err -} - -func (p *SpanRef) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SpanRef(%+v)", *p) -} - -// Attributes: -// - TraceIdLow -// - TraceIdHigh -// - SpanId -// - ParentSpanId -// - OperationName -// - References -// - Flags -// - StartTime -// - Duration -// - Tags -// - Logs -type Span struct { - TraceIdLow int64 `thrift:"traceIdLow,1,required" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,2,required" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,3,required" json:"spanId"` - ParentSpanId int64 `thrift:"parentSpanId,4,required" json:"parentSpanId"` - OperationName string `thrift:"operationName,5,required" json:"operationName"` - References []*SpanRef `thrift:"references,6" json:"references,omitempty"` - Flags int32 `thrift:"flags,7,required" json:"flags"` - StartTime int64 `thrift:"startTime,8,required" json:"startTime"` - Duration int64 `thrift:"duration,9,required" json:"duration"` - Tags []*Tag `thrift:"tags,10" json:"tags,omitempty"` - Logs []*Log `thrift:"logs,11" json:"logs,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *Span) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *Span) GetSpanId() int64 { - return p.SpanId -} - -func (p *Span) GetParentSpanId() int64 { - return p.ParentSpanId -} - -func (p *Span) GetOperationName() string { - return p.OperationName -} - -var Span_References_DEFAULT []*SpanRef - -func (p *Span) GetReferences() []*SpanRef { - return p.References -} - -func (p *Span) GetFlags() int32 { - return p.Flags -} - -func (p *Span) GetStartTime() int64 { - return p.StartTime -} - -func (p *Span) GetDuration() int64 { - return p.Duration -} - -var Span_Tags_DEFAULT []*Tag - -func (p *Span) GetTags() []*Tag { - return p.Tags -} - -var Span_Logs_DEFAULT []*Log - -func (p *Span) GetLogs() []*Log { - return p.Logs -} -func (p *Span) IsSetReferences() bool { - return p.References != nil -} - -func (p *Span) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Span) IsSetLogs() bool { - return p.Logs != nil -} - -func (p *Span) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - var issetParentSpanId bool = false - var issetOperationName bool = false - var issetFlags bool = false - var issetStartTime bool = false - var issetDuration bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTraceIdLow = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTraceIdHigh = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetSpanId = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - issetParentSpanId = true - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - issetOperationName = true - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 7: - if err := p.readField7(iprot); err != nil { - return err - } - issetFlags = true - case 8: - if err := p.readField8(iprot); err != nil { - return err - } - issetStartTime = true - case 9: - if err := p.readField9(iprot); err != nil { - return err - } - issetDuration = true - case 10: - if err := p.readField10(iprot); err != nil { - return err - } - case 11: - if err := p.readField11(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - if !issetParentSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")) - } - if !issetOperationName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")) - } - if !issetFlags { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")) - } - if !issetStartTime { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")) - } - if !issetDuration { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")) - } - return nil -} - -func (p *Span) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *Span) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *Span) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *Span) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ParentSpanId = v - } - return nil -} - -func (p *Span) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.OperationName = v - } - return nil -} - -func (p *Span) readField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*SpanRef, 0, size) - p.References = tSlice - for i := 0; i < size; i++ { - _elem1 := &SpanRef{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.References = append(p.References, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) readField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.Flags = v - } - return nil -} - -func (p *Span) readField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.StartTime = v - } - return nil -} - -func (p *Span) readField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Duration = v - } - return nil -} - -func (p *Span) readField10(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem2 := &Tag{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.Tags = append(p.Tags, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) readField11(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Log, 0, size) - p.Logs = tSlice - for i := 0; i < size; i++ { - _elem3 := &Log{} - if err := _elem3.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.Logs = append(p.Logs, _elem3) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) - } - return err -} - -func (p *Span) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) - } - return err -} - -func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) - } - return err -} - -func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) - } - return err -} - -func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) - } - if err := oprot.WriteString(string(p.OperationName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) - } - return err -} - -func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetReferences() { - if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.References { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) - } - } - return err -} - -func (p *Span) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) - } - if err := oprot.WriteI32(int32(p.Flags)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) - } - return err -} - -func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) - } - if err := oprot.WriteI64(int64(p.StartTime)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) - } - return err -} - -func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) - } - if err := oprot.WriteI64(int64(p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) - } - return err -} - -func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetLogs() { - if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Logs { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) - } - } - return err -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - ServiceName -// - Tags -type Process struct { - ServiceName string `thrift:"serviceName,1,required" json:"serviceName"` - Tags []*Tag `thrift:"tags,2" json:"tags,omitempty"` -} - -func NewProcess() *Process { - return &Process{} -} - -func (p *Process) GetServiceName() string { - return p.ServiceName -} - -var Process_Tags_DEFAULT []*Tag - -func (p *Process) GetTags() []*Tag { - return p.Tags -} -func (p *Process) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Process) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetServiceName bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetServiceName = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetServiceName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")) - } - return nil -} - -func (p *Process) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Process) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem4 := &Tag{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Tags = append(p.Tags, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Process) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Process"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Process) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *Process) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) - } - } - return err -} - -func (p *Process) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Process(%+v)", *p) -} - -// Attributes: -// - Process -// - Spans -type Batch struct { - Process *Process `thrift:"process,1,required" json:"process"` - Spans []*Span `thrift:"spans,2,required" json:"spans"` -} - -func NewBatch() *Batch { - return &Batch{} -} - -var Batch_Process_DEFAULT *Process - -func (p *Batch) GetProcess() *Process { - if !p.IsSetProcess() { - return Batch_Process_DEFAULT - } - return p.Process -} - -func (p *Batch) GetSpans() []*Span { - return p.Spans -} -func (p *Batch) IsSetProcess() bool { - return p.Process != nil -} - -func (p *Batch) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetProcess bool = false - var issetSpans bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetProcess = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetSpans = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetProcess { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")) - } - if !issetSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")) - } - return nil -} - -func (p *Batch) readField1(iprot thrift.TProtocol) error { - p.Process = &Process{} - if err := p.Process.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) - } - return nil -} - -func (p *Batch) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem5 := &Span{} - if err := _elem5.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) - } - p.Spans = append(p.Spans, _elem5) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Batch) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Batch"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) - } - if err := p.Process.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) - } - return err -} - -func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) - } - return err -} - -func (p *Batch) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Batch(%+v)", *p) -} - -// Attributes: -// - Ok -type BatchSubmitResponse struct { - Ok bool `thrift:"ok,1,required" json:"ok"` -} - -func NewBatchSubmitResponse() *BatchSubmitResponse { - return &BatchSubmitResponse{} -} - -func (p *BatchSubmitResponse) GetOk() bool { - return p.Ok -} -func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetOk = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *BatchSubmitResponse) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *BatchSubmitResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go new file mode 100644 index 000000000..015ad4b06 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package sampling + +var GoUnusedProtection__ int; + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go deleted file mode 100644 index 728988b83..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go new file mode 100644 index 000000000..5cc762824 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go @@ -0,0 +1,23 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package sampling + +import( + "bytes" + "context" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + + +func init() { +} + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go new file mode 100644 index 000000000..3bffa5b8e --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go @@ -0,0 +1,1323 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package sampling + +import( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +type SamplingStrategyType int64 +const ( + SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0 + SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1 +) + +func (p SamplingStrategyType) String() string { + switch p { + case SamplingStrategyType_PROBABILISTIC: return "PROBABILISTIC" + case SamplingStrategyType_RATE_LIMITING: return "RATE_LIMITING" + } + return "" +} + +func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) { + switch s { + case "PROBABILISTIC": return SamplingStrategyType_PROBABILISTIC, nil + case "RATE_LIMITING": return SamplingStrategyType_RATE_LIMITING, nil + } + return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string") +} + + +func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v } + +func (p SamplingStrategyType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *SamplingStrategyType) UnmarshalText(text []byte) error { +q, err := SamplingStrategyTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *SamplingStrategyType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = SamplingStrategyType(v) +return nil +} + +func (p * SamplingStrategyType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +// Attributes: +// - SamplingRate +type ProbabilisticSamplingStrategy struct { + SamplingRate float64 `thrift:"samplingRate,1,required" db:"samplingRate" json:"samplingRate"` +} + +func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy { + return &ProbabilisticSamplingStrategy{} +} + + +func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 { + return p.SamplingRate +} +func (p *ProbabilisticSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetSamplingRate bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetSamplingRate = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetSamplingRate{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set")); + } + return nil +} + +func (p *ProbabilisticSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.SamplingRate = v +} + return nil +} + +func (p *ProbabilisticSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ProbabilisticSamplingStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ProbabilisticSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "samplingRate", thrift.DOUBLE, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(p.SamplingRate)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) } + return err +} + +func (p *ProbabilisticSamplingStrategy) Equals(other *ProbabilisticSamplingStrategy) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.SamplingRate != other.SamplingRate { return false } + return true +} + +func (p *ProbabilisticSamplingStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p) +} + +// Attributes: +// - MaxTracesPerSecond +type RateLimitingSamplingStrategy struct { + MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" db:"maxTracesPerSecond" json:"maxTracesPerSecond"` +} + +func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy { + return &RateLimitingSamplingStrategy{} +} + + +func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 { + return p.MaxTracesPerSecond +} +func (p *RateLimitingSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetMaxTracesPerSecond bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I16 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetMaxTracesPerSecond = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetMaxTracesPerSecond{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set")); + } + return nil +} + +func (p *RateLimitingSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.MaxTracesPerSecond = v +} + return nil +} + +func (p *RateLimitingSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "RateLimitingSamplingStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *RateLimitingSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "maxTracesPerSecond", thrift.I16, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) } + if err := oprot.WriteI16(ctx, int16(p.MaxTracesPerSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) } + return err +} + +func (p *RateLimitingSamplingStrategy) Equals(other *RateLimitingSamplingStrategy) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.MaxTracesPerSecond != other.MaxTracesPerSecond { return false } + return true +} + +func (p *RateLimitingSamplingStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p) +} + +// Attributes: +// - Operation +// - ProbabilisticSampling +type OperationSamplingStrategy struct { + Operation string `thrift:"operation,1,required" db:"operation" json:"operation"` + ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" db:"probabilisticSampling" json:"probabilisticSampling"` +} + +func NewOperationSamplingStrategy() *OperationSamplingStrategy { + return &OperationSamplingStrategy{} +} + + +func (p *OperationSamplingStrategy) GetOperation() string { + return p.Operation +} +var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy +func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { + if !p.IsSetProbabilisticSampling() { + return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT + } +return p.ProbabilisticSampling +} +func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool { + return p.ProbabilisticSampling != nil +} + +func (p *OperationSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOperation bool = false; + var issetProbabilisticSampling bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOperation = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetProbabilisticSampling = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOperation{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set")); + } + if !issetProbabilisticSampling{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set")); + } + return nil +} + +func (p *OperationSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Operation = v +} + return nil +} + +func (p *OperationSamplingStrategy) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} + if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) + } + return nil +} + +func (p *OperationSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "OperationSamplingStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *OperationSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operation", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Operation)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) } + return err +} + +func (p *OperationSamplingStrategy) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) } + if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) } + return err +} + +func (p *OperationSamplingStrategy) Equals(other *OperationSamplingStrategy) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Operation != other.Operation { return false } + if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false } + return true +} + +func (p *OperationSamplingStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p) +} + +// Attributes: +// - DefaultSamplingProbability +// - DefaultLowerBoundTracesPerSecond +// - PerOperationStrategies +// - DefaultUpperBoundTracesPerSecond +type PerOperationSamplingStrategies struct { + DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" db:"defaultSamplingProbability" json:"defaultSamplingProbability"` + DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" db:"defaultLowerBoundTracesPerSecond" json:"defaultLowerBoundTracesPerSecond"` + PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" db:"perOperationStrategies" json:"perOperationStrategies"` + DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" db:"defaultUpperBoundTracesPerSecond" json:"defaultUpperBoundTracesPerSecond,omitempty"` +} + +func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies { + return &PerOperationSamplingStrategies{} +} + + +func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 { + return p.DefaultSamplingProbability +} + +func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 { + return p.DefaultLowerBoundTracesPerSecond +} + +func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy { + return p.PerOperationStrategies +} +var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64 +func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 { + if !p.IsSetDefaultUpperBoundTracesPerSecond() { + return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT + } +return *p.DefaultUpperBoundTracesPerSecond +} +func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool { + return p.DefaultUpperBoundTracesPerSecond != nil +} + +func (p *PerOperationSamplingStrategies) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetDefaultSamplingProbability bool = false; + var issetDefaultLowerBoundTracesPerSecond bool = false; + var issetPerOperationStrategies bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetDefaultSamplingProbability = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetDefaultLowerBoundTracesPerSecond = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetPerOperationStrategies = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetDefaultSamplingProbability{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set")); + } + if !issetDefaultLowerBoundTracesPerSecond{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set")); + } + if !issetPerOperationStrategies{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set")); + } + return nil +} + +func (p *PerOperationSamplingStrategies) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.DefaultSamplingProbability = v +} + return nil +} + +func (p *PerOperationSamplingStrategies) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.DefaultLowerBoundTracesPerSecond = v +} + return nil +} + +func (p *PerOperationSamplingStrategies) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*OperationSamplingStrategy, 0, size) + p.PerOperationStrategies = tSlice + for i := 0; i < size; i ++ { + _elem0 := &OperationSamplingStrategy{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PerOperationSamplingStrategies) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.DefaultUpperBoundTracesPerSecond = &v +} + return nil +} + +func (p *PerOperationSamplingStrategies) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "PerOperationSamplingStrategies"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *PerOperationSamplingStrategies) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "defaultSamplingProbability", thrift.DOUBLE, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(p.DefaultSamplingProbability)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) } + return err +} + +func (p *PerOperationSamplingStrategies) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(p.DefaultLowerBoundTracesPerSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) } + return err +} + +func (p *PerOperationSamplingStrategies) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "perOperationStrategies", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.PerOperationStrategies)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PerOperationStrategies { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) } + return err +} + +func (p *PerOperationSamplingStrategies) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDefaultUpperBoundTracesPerSecond() { + if err := oprot.WriteFieldBegin(ctx, "defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) } + if err := oprot.WriteDouble(ctx, float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) } + } + return err +} + +func (p *PerOperationSamplingStrategies) Equals(other *PerOperationSamplingStrategies) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.DefaultSamplingProbability != other.DefaultSamplingProbability { return false } + if p.DefaultLowerBoundTracesPerSecond != other.DefaultLowerBoundTracesPerSecond { return false } + if len(p.PerOperationStrategies) != len(other.PerOperationStrategies) { return false } + for i, _tgt := range p.PerOperationStrategies { + _src1 := other.PerOperationStrategies[i] + if !_tgt.Equals(_src1) { return false } + } + if p.DefaultUpperBoundTracesPerSecond != other.DefaultUpperBoundTracesPerSecond { + if p.DefaultUpperBoundTracesPerSecond == nil || other.DefaultUpperBoundTracesPerSecond == nil { + return false + } + if (*p.DefaultUpperBoundTracesPerSecond) != (*other.DefaultUpperBoundTracesPerSecond) { return false } + } + return true +} + +func (p *PerOperationSamplingStrategies) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p) +} + +// Attributes: +// - StrategyType +// - ProbabilisticSampling +// - RateLimitingSampling +// - OperationSampling +type SamplingStrategyResponse struct { + StrategyType SamplingStrategyType `thrift:"strategyType,1,required" db:"strategyType" json:"strategyType"` + ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" db:"probabilisticSampling" json:"probabilisticSampling,omitempty"` + RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" db:"rateLimitingSampling" json:"rateLimitingSampling,omitempty"` + OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" db:"operationSampling" json:"operationSampling,omitempty"` +} + +func NewSamplingStrategyResponse() *SamplingStrategyResponse { + return &SamplingStrategyResponse{} +} + + +func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType { + return p.StrategyType +} +var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy +func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { + if !p.IsSetProbabilisticSampling() { + return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT + } +return p.ProbabilisticSampling +} +var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy +func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy { + if !p.IsSetRateLimitingSampling() { + return SamplingStrategyResponse_RateLimitingSampling_DEFAULT + } +return p.RateLimitingSampling +} +var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies +func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies { + if !p.IsSetOperationSampling() { + return SamplingStrategyResponse_OperationSampling_DEFAULT + } +return p.OperationSampling +} +func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool { + return p.ProbabilisticSampling != nil +} + +func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool { + return p.RateLimitingSampling != nil +} + +func (p *SamplingStrategyResponse) IsSetOperationSampling() bool { + return p.OperationSampling != nil +} + +func (p *SamplingStrategyResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetStrategyType bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetStrategyType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetStrategyType{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set")); + } + return nil +} + +func (p *SamplingStrategyResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + temp := SamplingStrategyType(v) + p.StrategyType = temp +} + return nil +} + +func (p *SamplingStrategyResponse) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} + if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) + } + return nil +} + +func (p *SamplingStrategyResponse) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.RateLimitingSampling = &RateLimitingSamplingStrategy{} + if err := p.RateLimitingSampling.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err) + } + return nil +} + +func (p *SamplingStrategyResponse) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.OperationSampling = &PerOperationSamplingStrategies{} + if err := p.OperationSampling.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err) + } + return nil +} + +func (p *SamplingStrategyResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "SamplingStrategyResponse"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *SamplingStrategyResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "strategyType", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.StrategyType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) } + return err +} + +func (p *SamplingStrategyResponse) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetProbabilisticSampling() { + if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) } + if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) } + } + return err +} + +func (p *SamplingStrategyResponse) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetRateLimitingSampling() { + if err := oprot.WriteFieldBegin(ctx, "rateLimitingSampling", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) } + if err := p.RateLimitingSampling.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) } + } + return err +} + +func (p *SamplingStrategyResponse) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetOperationSampling() { + if err := oprot.WriteFieldBegin(ctx, "operationSampling", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) } + if err := p.OperationSampling.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) } + } + return err +} + +func (p *SamplingStrategyResponse) Equals(other *SamplingStrategyResponse) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.StrategyType != other.StrategyType { return false } + if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false } + if !p.RateLimitingSampling.Equals(other.RateLimitingSampling) { return false } + if !p.OperationSampling.Equals(other.OperationSampling) { return false } + return true +} + +func (p *SamplingStrategyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p) +} + +type SamplingManager interface { + // Parameters: + // - ServiceName + GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error) +} + +type SamplingManagerClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient { + return &SamplingManagerClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient { + return &SamplingManagerClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewSamplingManagerClient(c thrift.TClient) *SamplingManagerClient { + return &SamplingManagerClient{ + c: c, + } +} + +func (p *SamplingManagerClient) Client_() thrift.TClient { + return p.c +} + +func (p *SamplingManagerClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *SamplingManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - ServiceName +func (p *SamplingManagerClient) GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error) { + var _args2 SamplingManagerGetSamplingStrategyArgs + _args2.ServiceName = serviceName + var _result4 SamplingManagerGetSamplingStrategyResult + var _meta3 thrift.ResponseMeta + _meta3, _err = p.Client_().Call(ctx, "getSamplingStrategy", &_args2, &_result4) + p.SetLastResponseMeta_(_meta3) + if _err != nil { + return + } + return _result4.GetSuccess(), nil +} + +type SamplingManagerProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler SamplingManager +} + +func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor { + + self5 := &SamplingManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self5.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler:handler} +return self5 +} + +func (p *SamplingManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { return false, thrift.WrapTException(err2) } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x6 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x6.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x6 + +} + +type samplingManagerProcessorGetSamplingStrategy struct { + handler SamplingManager +} + +func (p *samplingManagerProcessorGetSamplingStrategy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := SamplingManagerGetSamplingStrategyArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := SamplingManagerGetSamplingStrategyResult{} + var retval *SamplingStrategyResponse + if retval, err2 = p.handler.GetSamplingStrategy(ctx, args.ServiceName); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: " + err2.Error()) + oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - ServiceName +type SamplingManagerGetSamplingStrategyArgs struct { + ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"` +} + +func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs { + return &SamplingManagerGetSamplingStrategyArgs{} +} + + +func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string { + return p.ServiceName +} +func (p *SamplingManagerGetSamplingStrategyArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.ServiceName = v +} + return nil +} + +func (p *SamplingManagerGetSamplingStrategyArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) } + return err +} + +func (p *SamplingManagerGetSamplingStrategyArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p) +} + +// Attributes: +// - Success +type SamplingManagerGetSamplingStrategyResult struct { + Success *SamplingStrategyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult { + return &SamplingManagerGetSamplingStrategyResult{} +} + +var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse +func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse { + if !p.IsSetSuccess() { + return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT + } +return p.Success +} +func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *SamplingManagerGetSamplingStrategyResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + p.Success = &SamplingStrategyResponse{} + if err := p.Success.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *SamplingManagerGetSamplingStrategyResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := p.Success.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *SamplingManagerGetSamplingStrategyResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p) +} + + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go deleted file mode 100644 index 563e2b4c9..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/samplingmanager.go +++ /dev/null @@ -1,410 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type SamplingManager interface { - // Parameters: - // - ServiceName - GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) -} - -type SamplingManagerClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient { - return &SamplingManagerClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient { - return &SamplingManagerClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - ServiceName -func (p *SamplingManagerClient) GetSamplingStrategy(serviceName string) (r *SamplingStrategyResponse, err error) { - if err = p.sendGetSamplingStrategy(serviceName); err != nil { - return - } - return p.recvGetSamplingStrategy() -} - -func (p *SamplingManagerClient) sendGetSamplingStrategy(serviceName string) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("getSamplingStrategy", thrift.CALL, p.SeqId); err != nil { - return - } - args := SamplingManagerGetSamplingStrategyArgs{ - ServiceName: serviceName, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *SamplingManagerClient) recvGetSamplingStrategy() (value *SamplingStrategyResponse, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "getSamplingStrategy" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getSamplingStrategy failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getSamplingStrategy failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error1 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error2 error - error2, err = error1.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error2 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getSamplingStrategy failed: invalid message type") - return - } - result := SamplingManagerGetSamplingStrategyResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -type SamplingManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler SamplingManager -} - -func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor { - - self3 := &SamplingManagerProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self3.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler: handler} - return self3 -} - -func (p *SamplingManagerProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x4.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x4 - -} - -type samplingManagerProcessorGetSamplingStrategy struct { - handler SamplingManager -} - -func (p *samplingManagerProcessorGetSamplingStrategy) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := SamplingManagerGetSamplingStrategyArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := SamplingManagerGetSamplingStrategyResult{} - var retval *SamplingStrategyResponse - var err2 error - if retval, err2 = p.handler.GetSamplingStrategy(args.ServiceName); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: "+err2.Error()) - oprot.WriteMessageBegin("getSamplingStrategy", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getSamplingStrategy", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type SamplingManagerGetSamplingStrategyArgs struct { - ServiceName string `thrift:"serviceName,1" json:"serviceName"` -} - -func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs { - return &SamplingManagerGetSamplingStrategyArgs{} -} - -func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string { - return p.ServiceName -} -func (p *SamplingManagerGetSamplingStrategyArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getSamplingStrategy_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *SamplingManagerGetSamplingStrategyArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p) -} - -// Attributes: -// - Success -type SamplingManagerGetSamplingStrategyResult struct { - Success *SamplingStrategyResponse `thrift:"success,0" json:"success,omitempty"` -} - -func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult { - return &SamplingManagerGetSamplingStrategyResult{} -} - -var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse - -func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse { - if !p.IsSetSuccess() { - return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT - } - return p.Success -} -func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) readField0(iprot thrift.TProtocol) error { - p.Success = &SamplingStrategyResponse{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getSamplingStrategy_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *SamplingManagerGetSamplingStrategyResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go deleted file mode 100644 index 3e831af4a..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/ttypes.go +++ /dev/null @@ -1,873 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package sampling - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -type SamplingStrategyType int64 - -const ( - SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0 - SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1 -) - -func (p SamplingStrategyType) String() string { - switch p { - case SamplingStrategyType_PROBABILISTIC: - return "PROBABILISTIC" - case SamplingStrategyType_RATE_LIMITING: - return "RATE_LIMITING" - } - return "" -} - -func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) { - switch s { - case "PROBABILISTIC": - return SamplingStrategyType_PROBABILISTIC, nil - case "RATE_LIMITING": - return SamplingStrategyType_RATE_LIMITING, nil - } - return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string") -} - -func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v } - -func (p SamplingStrategyType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *SamplingStrategyType) UnmarshalText(text []byte) error { - q, err := SamplingStrategyTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -// Attributes: -// - SamplingRate -type ProbabilisticSamplingStrategy struct { - SamplingRate float64 `thrift:"samplingRate,1,required" json:"samplingRate"` -} - -func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy { - return &ProbabilisticSamplingStrategy{} -} - -func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 { - return p.SamplingRate -} -func (p *ProbabilisticSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetSamplingRate bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetSamplingRate = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetSamplingRate { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set")) - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.SamplingRate = v - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("ProbabilisticSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("samplingRate", thrift.DOUBLE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) - } - if err := oprot.WriteDouble(float64(p.SamplingRate)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) - } - return err -} - -func (p *ProbabilisticSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - MaxTracesPerSecond -type RateLimitingSamplingStrategy struct { - MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" json:"maxTracesPerSecond"` -} - -func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy { - return &RateLimitingSamplingStrategy{} -} - -func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 { - return p.MaxTracesPerSecond -} -func (p *RateLimitingSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetMaxTracesPerSecond bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetMaxTracesPerSecond = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetMaxTracesPerSecond { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set")) - } - return nil -} - -func (p *RateLimitingSamplingStrategy) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.MaxTracesPerSecond = v - } - return nil -} - -func (p *RateLimitingSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("RateLimitingSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *RateLimitingSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("maxTracesPerSecond", thrift.I16, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) - } - if err := oprot.WriteI16(int16(p.MaxTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) - } - return err -} - -func (p *RateLimitingSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - Operation -// - ProbabilisticSampling -type OperationSamplingStrategy struct { - Operation string `thrift:"operation,1,required" json:"operation"` - ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" json:"probabilisticSampling"` -} - -func NewOperationSamplingStrategy() *OperationSamplingStrategy { - return &OperationSamplingStrategy{} -} - -func (p *OperationSamplingStrategy) GetOperation() string { - return p.Operation -} - -var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy - -func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { - if !p.IsSetProbabilisticSampling() { - return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT - } - return p.ProbabilisticSampling -} -func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool { - return p.ProbabilisticSampling != nil -} - -func (p *OperationSamplingStrategy) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOperation bool = false - var issetProbabilisticSampling bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetOperation = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetProbabilisticSampling = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOperation { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set")) - } - if !issetProbabilisticSampling { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set")) - } - return nil -} - -func (p *OperationSamplingStrategy) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Operation = v - } - return nil -} - -func (p *OperationSamplingStrategy) readField2(iprot thrift.TProtocol) error { - p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} - if err := p.ProbabilisticSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) - } - return nil -} - -func (p *OperationSamplingStrategy) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("OperationSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *OperationSamplingStrategy) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("operation", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) - } - if err := oprot.WriteString(string(p.Operation)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) - } - return err -} - -func (p *OperationSamplingStrategy) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) - } - if err := p.ProbabilisticSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) - } - return err -} - -func (p *OperationSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - DefaultSamplingProbability -// - DefaultLowerBoundTracesPerSecond -// - PerOperationStrategies -// - DefaultUpperBoundTracesPerSecond -type PerOperationSamplingStrategies struct { - DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" json:"defaultSamplingProbability"` - DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" json:"defaultLowerBoundTracesPerSecond"` - PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" json:"perOperationStrategies"` - DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" json:"defaultUpperBoundTracesPerSecond,omitempty"` -} - -func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies { - return &PerOperationSamplingStrategies{} -} - -func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 { - return p.DefaultSamplingProbability -} - -func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 { - return p.DefaultLowerBoundTracesPerSecond -} - -func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy { - return p.PerOperationStrategies -} - -var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64 - -func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 { - if !p.IsSetDefaultUpperBoundTracesPerSecond() { - return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT - } - return *p.DefaultUpperBoundTracesPerSecond -} -func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool { - return p.DefaultUpperBoundTracesPerSecond != nil -} - -func (p *PerOperationSamplingStrategies) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetDefaultSamplingProbability bool = false - var issetDefaultLowerBoundTracesPerSecond bool = false - var issetPerOperationStrategies bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetDefaultSamplingProbability = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetDefaultLowerBoundTracesPerSecond = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetPerOperationStrategies = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetDefaultSamplingProbability { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set")) - } - if !issetDefaultLowerBoundTracesPerSecond { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set")) - } - if !issetPerOperationStrategies { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set")) - } - return nil -} - -func (p *PerOperationSamplingStrategies) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.DefaultSamplingProbability = v - } - return nil -} - -func (p *PerOperationSamplingStrategies) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.DefaultLowerBoundTracesPerSecond = v - } - return nil -} - -func (p *PerOperationSamplingStrategies) readField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*OperationSamplingStrategy, 0, size) - p.PerOperationStrategies = tSlice - for i := 0; i < size; i++ { - _elem0 := &OperationSamplingStrategy{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *PerOperationSamplingStrategies) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.DefaultUpperBoundTracesPerSecond = &v - } - return nil -} - -func (p *PerOperationSamplingStrategies) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("PerOperationSamplingStrategies"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *PerOperationSamplingStrategies) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("defaultSamplingProbability", thrift.DOUBLE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) - } - if err := oprot.WriteDouble(float64(p.DefaultSamplingProbability)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) - } - if err := oprot.WriteDouble(float64(p.DefaultLowerBoundTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("perOperationStrategies", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PerOperationStrategies)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PerOperationStrategies { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) - } - return err -} - -func (p *PerOperationSamplingStrategies) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultUpperBoundTracesPerSecond() { - if err := oprot.WriteFieldBegin("defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) - } - if err := oprot.WriteDouble(float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) - } - } - return err -} - -func (p *PerOperationSamplingStrategies) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p) -} - -// Attributes: -// - StrategyType -// - ProbabilisticSampling -// - RateLimitingSampling -// - OperationSampling -type SamplingStrategyResponse struct { - StrategyType SamplingStrategyType `thrift:"strategyType,1,required" json:"strategyType"` - ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" json:"probabilisticSampling,omitempty"` - RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" json:"rateLimitingSampling,omitempty"` - OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" json:"operationSampling,omitempty"` -} - -func NewSamplingStrategyResponse() *SamplingStrategyResponse { - return &SamplingStrategyResponse{} -} - -func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType { - return p.StrategyType -} - -var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy - -func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { - if !p.IsSetProbabilisticSampling() { - return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT - } - return p.ProbabilisticSampling -} - -var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy - -func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy { - if !p.IsSetRateLimitingSampling() { - return SamplingStrategyResponse_RateLimitingSampling_DEFAULT - } - return p.RateLimitingSampling -} - -var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies - -func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies { - if !p.IsSetOperationSampling() { - return SamplingStrategyResponse_OperationSampling_DEFAULT - } - return p.OperationSampling -} -func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool { - return p.ProbabilisticSampling != nil -} - -func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool { - return p.RateLimitingSampling != nil -} - -func (p *SamplingStrategyResponse) IsSetOperationSampling() bool { - return p.OperationSampling != nil -} - -func (p *SamplingStrategyResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetStrategyType bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetStrategyType = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetStrategyType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set")) - } - return nil -} - -func (p *SamplingStrategyResponse) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := SamplingStrategyType(v) - p.StrategyType = temp - } - return nil -} - -func (p *SamplingStrategyResponse) readField2(iprot thrift.TProtocol) error { - p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} - if err := p.ProbabilisticSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) readField3(iprot thrift.TProtocol) error { - p.RateLimitingSampling = &RateLimitingSamplingStrategy{} - if err := p.RateLimitingSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) readField4(iprot thrift.TProtocol) error { - p.OperationSampling = &PerOperationSamplingStrategies{} - if err := p.OperationSampling.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("SamplingStrategyResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SamplingStrategyResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("strategyType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) - } - if err := oprot.WriteI32(int32(p.StrategyType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) - } - return err -} - -func (p *SamplingStrategyResponse) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetProbabilisticSampling() { - if err := oprot.WriteFieldBegin("probabilisticSampling", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) - } - if err := p.ProbabilisticSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetRateLimitingSampling() { - if err := oprot.WriteFieldBegin("rateLimitingSampling", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) - } - if err := p.RateLimitingSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetOperationSampling() { - if err := oprot.WriteFieldBegin("operationSampling", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) - } - if err := p.OperationSampling.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) - } - } - return err -} - -func (p *SamplingStrategyResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go new file mode 100644 index 000000000..ebf43018f --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +var GoUnusedProtection__ int; + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go deleted file mode 100644 index dafd2b8e9..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/constants.go +++ /dev/null @@ -1,32 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -const CLIENT_SEND = "cs" -const CLIENT_RECV = "cr" -const SERVER_SEND = "ss" -const SERVER_RECV = "sr" -const WIRE_SEND = "ws" -const WIRE_RECV = "wr" -const CLIENT_SEND_FRAGMENT = "csf" -const CLIENT_RECV_FRAGMENT = "crf" -const SERVER_SEND_FRAGMENT = "ssf" -const SERVER_RECV_FRAGMENT = "srf" -const LOCAL_COMPONENT = "lc" -const CLIENT_ADDR = "ca" -const SERVER_ADDR = "sa" - -func init() { -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go deleted file mode 100644 index 50c7a9944..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/ttypes.go +++ /dev/null @@ -1,1247 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -type AnnotationType int64 - -const ( - AnnotationType_BOOL AnnotationType = 0 - AnnotationType_BYTES AnnotationType = 1 - AnnotationType_I16 AnnotationType = 2 - AnnotationType_I32 AnnotationType = 3 - AnnotationType_I64 AnnotationType = 4 - AnnotationType_DOUBLE AnnotationType = 5 - AnnotationType_STRING AnnotationType = 6 -) - -func (p AnnotationType) String() string { - switch p { - case AnnotationType_BOOL: - return "BOOL" - case AnnotationType_BYTES: - return "BYTES" - case AnnotationType_I16: - return "I16" - case AnnotationType_I32: - return "I32" - case AnnotationType_I64: - return "I64" - case AnnotationType_DOUBLE: - return "DOUBLE" - case AnnotationType_STRING: - return "STRING" - } - return "" -} - -func AnnotationTypeFromString(s string) (AnnotationType, error) { - switch s { - case "BOOL": - return AnnotationType_BOOL, nil - case "BYTES": - return AnnotationType_BYTES, nil - case "I16": - return AnnotationType_I16, nil - case "I32": - return AnnotationType_I32, nil - case "I64": - return AnnotationType_I64, nil - case "DOUBLE": - return AnnotationType_DOUBLE, nil - case "STRING": - return AnnotationType_STRING, nil - } - return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") -} - -func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } - -func (p AnnotationType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *AnnotationType) UnmarshalText(text []byte) error { - q, err := AnnotationTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -// Indicates the network context of a service recording an annotation with two -// exceptions. -// -// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, -// the endpoint indicates the source or destination of an RPC. This exception -// allows zipkin to display network context of uninstrumented services, or -// clients such as web browsers. -// -// Attributes: -// - Ipv4: IPv4 host address packed into 4 bytes. -// -// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 -// - Port: IPv4 port -// -// Note: this is to be treated as an unsigned integer, so watch for negatives. -// -// Conventionally, when the port isn't known, port = 0. -// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" -// -// Conventionally, when the service name isn't known, service_name = "unknown". -type Endpoint struct { - Ipv4 int32 `thrift:"ipv4,1" json:"ipv4"` - Port int16 `thrift:"port,2" json:"port"` - ServiceName string `thrift:"service_name,3" json:"service_name"` -} - -func NewEndpoint() *Endpoint { - return &Endpoint{} -} - -func (p *Endpoint) GetIpv4() int32 { - return p.Ipv4 -} - -func (p *Endpoint) GetPort() int16 { - return p.Port -} - -func (p *Endpoint) GetServiceName() string { - return p.ServiceName -} -func (p *Endpoint) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Endpoint) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ipv4 = v - } - return nil -} - -func (p *Endpoint) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Port = v - } - return nil -} - -func (p *Endpoint) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Endpoint) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Endpoint"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) - } - if err := oprot.WriteI32(int32(p.Ipv4)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) - } - return err -} - -func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) - } - if err := oprot.WriteI16(int16(p.Port)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) - } - return err -} - -func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) - } - return err -} - -func (p *Endpoint) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Endpoint(%+v)", *p) -} - -// An annotation is similar to a log statement. It includes a host field which -// allows these events to be attributed properly, and also aggregatable. -// -// Attributes: -// - Timestamp: Microseconds from epoch. -// -// This value should use the most precise value possible. For example, -// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. -// - Value -// - Host: Always the host that recorded the event. By specifying the host you allow -// rollup of all events (such as client requests to a service) by IP address. -type Annotation struct { - Timestamp int64 `thrift:"timestamp,1" json:"timestamp"` - Value string `thrift:"value,2" json:"value"` - Host *Endpoint `thrift:"host,3" json:"host,omitempty"` -} - -func NewAnnotation() *Annotation { - return &Annotation{} -} - -func (p *Annotation) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Annotation) GetValue() string { - return p.Value -} - -var Annotation_Host_DEFAULT *Endpoint - -func (p *Annotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return Annotation_Host_DEFAULT - } - return p.Host -} -func (p *Annotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *Annotation) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Annotation) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Annotation) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *Annotation) readField3(iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *Annotation) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Annotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteString(string(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) - } - if err := p.Host.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) - } - } - return err -} - -func (p *Annotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Annotation(%+v)", *p) -} - -// Binary annotations are tags applied to a Span to give it context. For -// example, a binary annotation of "http.uri" could the path to a resource in a -// RPC call. -// -// Binary annotations of type STRING are always queryable, though more a -// historical implementation detail than a structural concern. -// -// Binary annotations can repeat, and vary on the host. Similar to Annotation, -// the host indicates who logged the event. This allows you to tell the -// difference between the client and server side of the same key. For example, -// the key "http.uri" might be different on the client and server side due to -// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, -// you can see the different points of view, which often help in debugging. -// -// Attributes: -// - Key -// - Value -// - AnnotationType -// - Host: The host that recorded tag, which allows you to differentiate between -// multiple tags with the same key. There are two exceptions to this. -// -// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or -// destination of an RPC. This exception allows zipkin to display network -// context of uninstrumented services, or clients such as web browsers. -type BinaryAnnotation struct { - Key string `thrift:"key,1" json:"key"` - Value []byte `thrift:"value,2" json:"value"` - AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"` - Host *Endpoint `thrift:"host,4" json:"host,omitempty"` -} - -func NewBinaryAnnotation() *BinaryAnnotation { - return &BinaryAnnotation{} -} - -func (p *BinaryAnnotation) GetKey() string { - return p.Key -} - -func (p *BinaryAnnotation) GetValue() []byte { - return p.Value -} - -func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { - return p.AnnotationType -} - -var BinaryAnnotation_Host_DEFAULT *Endpoint - -func (p *BinaryAnnotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return BinaryAnnotation_Host_DEFAULT - } - return p.Host -} -func (p *BinaryAnnotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - temp := AnnotationType(v) - p.AnnotationType = temp - } - return nil -} - -func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) - } - if err := oprot.WriteBinary(p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) - } - if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) - } - return err -} - -func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) - } - if err := p.Host.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) - } - } - return err -} - -func (p *BinaryAnnotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BinaryAnnotation(%+v)", *p) -} - -// A trace is a series of spans (often RPC calls) which form a latency tree. -// -// The root span is where trace_id = id and parent_id = Nil. The root span is -// usually the longest interval in the trace, starting with a SERVER_RECV -// annotation and ending with a SERVER_SEND. -// -// Attributes: -// - TraceID -// - Name: Span name in lowercase, rpc method for example -// -// Conventionally, when the span name isn't known, name = "unknown". -// - ID -// - ParentID -// - Annotations -// - BinaryAnnotations -// - Debug -// - Timestamp: Microseconds from epoch of the creation of this span. -// -// This value should be set directly by instrumentation, using the most -// precise value possible. For example, gettimeofday or syncing nanoTime -// against a tick of currentTimeMillis. -// -// For compatibilty with instrumentation that precede this field, collectors -// or span stores can derive this via Annotation.timestamp. -// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. -// -// This field is optional for compatibility with old data: first-party span -// stores are expected to support this at time of introduction. -// - Duration: Measurement of duration in microseconds, used to support queries. -// -// This value should be set directly, where possible. Doing so encourages -// precise measurement decoupled from problems of clocks, such as skew or NTP -// updates causing time to move backwards. -// -// For compatibilty with instrumentation that precede this field, collectors -// or span stores can derive this by subtracting Annotation.timestamp. -// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. -// -// If this field is persisted as unset, zipkin will continue to work, except -// duration query support will be implementation-specific. Similarly, setting -// this field non-atomically is implementation-specific. -// -// This field is i64 vs i32 to support spans longer than 35 minutes. -type Span struct { - TraceID int64 `thrift:"trace_id,1" json:"trace_id"` - // unused field # 2 - Name string `thrift:"name,3" json:"name"` - ID int64 `thrift:"id,4" json:"id"` - ParentID *int64 `thrift:"parent_id,5" json:"parent_id,omitempty"` - Annotations []*Annotation `thrift:"annotations,6" json:"annotations"` - // unused field # 7 - BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"` - Debug bool `thrift:"debug,9" json:"debug,omitempty"` - Timestamp *int64 `thrift:"timestamp,10" json:"timestamp,omitempty"` - Duration *int64 `thrift:"duration,11" json:"duration,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceID() int64 { - return p.TraceID -} - -func (p *Span) GetName() string { - return p.Name -} - -func (p *Span) GetID() int64 { - return p.ID -} - -var Span_ParentID_DEFAULT int64 - -func (p *Span) GetParentID() int64 { - if !p.IsSetParentID() { - return Span_ParentID_DEFAULT - } - return *p.ParentID -} - -func (p *Span) GetAnnotations() []*Annotation { - return p.Annotations -} - -func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { - return p.BinaryAnnotations -} - -var Span_Debug_DEFAULT bool = false - -func (p *Span) GetDebug() bool { - return p.Debug -} - -var Span_Timestamp_DEFAULT int64 - -func (p *Span) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return Span_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var Span_Duration_DEFAULT int64 - -func (p *Span) GetDuration() int64 { - if !p.IsSetDuration() { - return Span_Duration_DEFAULT - } - return *p.Duration -} -func (p *Span) IsSetParentID() bool { - return p.ParentID != nil -} - -func (p *Span) IsSetDebug() bool { - return p.Debug != Span_Debug_DEFAULT -} - -func (p *Span) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *Span) IsSetDuration() bool { - return p.Duration != nil -} - -func (p *Span) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 8: - if err := p.readField8(iprot); err != nil { - return err - } - case 9: - if err := p.readField9(iprot); err != nil { - return err - } - case 10: - if err := p.readField10(iprot); err != nil { - return err - } - case 11: - if err := p.readField11(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Span) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceID = v - } - return nil -} - -func (p *Span) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Name = v - } - return nil -} - -func (p *Span) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ID = v - } - return nil -} - -func (p *Span) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.ParentID = &v - } - return nil -} - -func (p *Span) readField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Annotation, 0, size) - p.Annotations = tSlice - for i := 0; i < size; i++ { - _elem0 := &Annotation{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Annotations = append(p.Annotations, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) readField8(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BinaryAnnotation, 0, size) - p.BinaryAnnotations = tSlice - for i := 0; i < size; i++ { - _elem1 := &BinaryAnnotation{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) readField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Debug = v - } - return nil -} - -func (p *Span) readField10(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 10: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *Span) readField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 11: ", err) - } else { - p.Duration = &v - } - return nil -} - -func (p *Span) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) - } - return err -} - -func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) - } - if err := oprot.WriteString(string(p.Name)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) - } - return err -} - -func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) - } - if err := oprot.WriteI64(int64(p.ID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) - } - return err -} - -func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetParentID() { - if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) - } - if err := oprot.WriteI64(int64(*p.ParentID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) - } - } - return err -} - -func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Annotations { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) - } - return err -} - -func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.BinaryAnnotations { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) - } - return err -} - -func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetDebug() { - if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) - } - if err := oprot.WriteBool(bool(p.Debug)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) - } - } - return err -} - -func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetDuration() { - if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) - } - } - return err -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - Ok -type Response struct { - Ok bool `thrift:"ok,1,required" json:"ok"` -} - -func NewResponse() *Response { - return &Response{} -} - -func (p *Response) GetOk() bool { - return p.Ok -} -func (p *Response) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetOk = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *Response) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *Response) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Response) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *Response) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Response(%+v)", *p) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go deleted file mode 100644 index bcc54f36e..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincollector.go +++ /dev/null @@ -1,446 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package zipkincore - -import ( - "bytes" - "fmt" - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type ZipkinCollector interface { - // Parameters: - // - Spans - SubmitZipkinBatch(spans []*Span) (r []*Response, err error) -} - -type ZipkinCollectorClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { - return &ZipkinCollectorClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { - return &ZipkinCollectorClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - Spans -func (p *ZipkinCollectorClient) SubmitZipkinBatch(spans []*Span) (r []*Response, err error) { - if err = p.sendSubmitZipkinBatch(spans); err != nil { - return - } - return p.recvSubmitZipkinBatch() -} - -func (p *ZipkinCollectorClient) sendSubmitZipkinBatch(spans []*Span) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("submitZipkinBatch", thrift.CALL, p.SeqId); err != nil { - return - } - args := ZipkinCollectorSubmitZipkinBatchArgs{ - Spans: spans, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *ZipkinCollectorClient) recvSubmitZipkinBatch() (value []*Response, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "submitZipkinBatch" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "submitZipkinBatch failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "submitZipkinBatch failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error2 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error3 error - error3, err = error2.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error3 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "submitZipkinBatch failed: invalid message type") - return - } - result := ZipkinCollectorSubmitZipkinBatchResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - value = result.GetSuccess() - return -} - -type ZipkinCollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler ZipkinCollector -} - -func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { - - self4 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self4.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} - return self4 -} - -func (p *ZipkinCollectorProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x5 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x5.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x5 - -} - -type zipkinCollectorProcessorSubmitZipkinBatch struct { - handler ZipkinCollector -} - -func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := ZipkinCollectorSubmitZipkinBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := ZipkinCollectorSubmitZipkinBatchResult{} - var retval []*Response - var err2 error - if retval, err2 = p.handler.SubmitZipkinBatch(args.Spans); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) - oprot.WriteMessageBegin("submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type ZipkinCollectorSubmitZipkinBatchArgs struct { - Spans []*Span `thrift:"spans,1" json:"spans"` -} - -func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { - return &ZipkinCollectorSubmitZipkinBatchArgs{} -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { - return p.Spans -} -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) readField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem6 := &Span{} - if err := _elem6.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) - } - p.Spans = append(p.Spans, _elem6) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Success -type ZipkinCollectorSubmitZipkinBatchResult struct { - Success []*Response `thrift:"success,0" json:"success,omitempty"` -} - -func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { - return &ZipkinCollectorSubmitZipkinBatchResult{} -} - -var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response - -func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { - return p.Success -} -func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) readField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Response, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem7 := &Response{} - if err := _elem7.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) - } - p.Success = append(p.Success, _elem7) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitZipkinBatch_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go new file mode 100644 index 000000000..7a924b977 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go @@ -0,0 +1,39 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +import( + "bytes" + "context" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const MESSAGE_SEND = "ms" +const MESSAGE_RECV = "mr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const LOCAL_COMPONENT = "lc" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" +const MESSAGE_ADDR = "ma" + +func init() { +} + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go new file mode 100644 index 000000000..b00ecd23f --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go @@ -0,0 +1,1853 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +import( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + "github.com/uber/jaeger-client-go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +type AnnotationType int64 +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: return "BOOL" + case AnnotationType_BYTES: return "BYTES" + case AnnotationType_I16: return "I16" + case AnnotationType_I32: return "I32" + case AnnotationType_I64: return "I64" + case AnnotationType_DOUBLE: return "DOUBLE" + case AnnotationType_STRING: return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": return AnnotationType_BOOL, nil + case "BYTES": return AnnotationType_BYTES, nil + case "I16": return AnnotationType_I16, nil + case "I32": return AnnotationType_I32, nil + case "I64": return AnnotationType_I64, nil + case "DOUBLE": return AnnotationType_DOUBLE, nil + case "STRING": return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { +q, err := AnnotationTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = AnnotationType(v) +return nil +} + +func (p * AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// +// Conventionally, when the port isn't known, port = 0. +// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Ipv4 = v +} + return nil +} + +func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Port = v +} + return nil +} + +func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.ServiceName = v +} + return nil +} + +func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.Ipv6 = v +} + return nil +} + +func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) } + return err +} + +func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) } + if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) } + return err +} + +func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) } + return err +} + +func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) } + } + return err +} + +func (p *Endpoint) Equals(other *Endpoint) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ipv4 != other.Ipv4 { return false } + if p.Port != other.Port { return false } + if p.ServiceName != other.ServiceName { return false } + if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { return false } + return true +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// An annotation is similar to a log statement. It includes a host field which +// allows these events to be attributed properly, and also aggregatable. +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. +// - Value +// - Host: Always the host that recorded the event. By specifying the host you allow +// rollup of all events (such as client requests to a service) by IP address. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} +var Annotation_Host_DEFAULT *Endpoint +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } +return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Timestamp = v +} + return nil +} + +func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Value = v +} + return nil +} + +func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) } + return err +} + +func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } + return err +} + +func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) } + } + return err +} + +func (p *Annotation) Equals(other *Annotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { return false } + if p.Value != other.Value { return false } + if !p.Host.Equals(other.Host) { return false } + return true +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of "http.uri" could the path to a resource in a +// RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.uri" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key +// - Value +// - AnnotationType +// - Host: The host that recorded tag, which allows you to differentiate between +// multiple tags with the same key. There are two exceptions to this. +// +// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, or clients such as web browsers. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} +var BinaryAnnotation_Host_DEFAULT *Endpoint +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } +return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Key = v +} + return nil +} + +func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Value = v +} + return nil +} + +func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := AnnotationType(v) + p.AnnotationType = temp +} + return nil +} + +func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField2(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } + return err +} + +func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } + if err := oprot.WriteBinary(ctx, p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } + return err +} + +func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) } + if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) } + return err +} + +func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) } + } + return err +} + +func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { return false } + if bytes.Compare(p.Value, other.Value) != 0 { return false } + if p.AnnotationType != other.AnnotationType { return false } + if !p.Host.Equals(other.Host) { return false } + return true +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// The root span is where trace_id = id and parent_id = Nil. The root span is +// usually the longest interval in the trace, starting with a SERVER_RECV +// annotation and ending with a SERVER_SEND. +// +// Attributes: +// - TraceID +// - Name: Span name in lowercase, rpc method for example +// +// Conventionally, when the span name isn't known, name = "unknown". +// - ID +// - ParentID +// - Annotations +// - BinaryAnnotations +// - Debug +// - Timestamp: Microseconds from epoch of the creation of this span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// This field is optional for compatibility with old data: first-party span +// stores are expected to support this at time of introduction. +// - Duration: Measurement of duration in microseconds, used to support queries. +// +// This value should be set directly, where possible. Doing so encourages +// precise measurement decoupled from problems of clocks, such as skew or NTP +// updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} +var Span_ParentID_DEFAULT int64 +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } +return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} +var Span_Timestamp_DEFAULT int64 +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } +return *p.Timestamp +} +var Span_Duration_DEFAULT int64 +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } +return *p.Duration +} +var Span_TraceIDHigh_DEFAULT int64 +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } +return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.TraceID = v +} + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.Name = v +} + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.ID = v +} + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.ParentID = &v +} + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i ++ { + _elem0 := &Annotation{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i ++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.Debug = v +} + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) +} else { + p.Timestamp = &v +} + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) +} else { + p.Duration = &v +} + return nil +} + +func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) +} else { + p.TraceIDHigh = &v +} + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + if err := p.writeField3(ctx, oprot); err != nil { return err } + if err := p.writeField4(ctx, oprot); err != nil { return err } + if err := p.writeField5(ctx, oprot); err != nil { return err } + if err := p.writeField6(ctx, oprot); err != nil { return err } + if err := p.writeField8(ctx, oprot); err != nil { return err } + if err := p.writeField9(ctx, oprot); err != nil { return err } + if err := p.writeField10(ctx, oprot); err != nil { return err } + if err := p.writeField11(ctx, oprot); err != nil { return err } + if err := p.writeField12(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) } + if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) } + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) } + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) } + } + return err +} + +func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) } + if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceID != other.TraceID { return false } + if p.Name != other.Name { return false } + if p.ID != other.ID { return false } + if p.ParentID != other.ParentID { + if p.ParentID == nil || other.ParentID == nil { + return false + } + if (*p.ParentID) != (*other.ParentID) { return false } + } + if len(p.Annotations) != len(other.Annotations) { return false } + for i, _tgt := range p.Annotations { + _src2 := other.Annotations[i] + if !_tgt.Equals(_src2) { return false } + } + if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { return false } + for i, _tgt := range p.BinaryAnnotations { + _src3 := other.BinaryAnnotations[i] + if !_tgt.Equals(_src3) { return false } + } + if p.Debug != other.Debug { return false } + if p.Timestamp != other.Timestamp { + if p.Timestamp == nil || other.Timestamp == nil { + return false + } + if (*p.Timestamp) != (*other.Timestamp) { return false } + } + if p.Duration != other.Duration { + if p.Duration == nil || other.Duration == nil { + return false + } + if (*p.Duration) != (*other.Duration) { return false } + } + if p.TraceIDHigh != other.TraceIDHigh { + if p.TraceIDHigh == nil || other.TraceIDHigh == nil { + return false + } + if (*p.TraceIDHigh) != (*other.TraceIDHigh) { return false } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - Ok +type Response struct { + Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` +} + +func NewResponse() *Response { + return &Response{} +} + + +func (p *Response) GetOk() bool { + return p.Ok +} +func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false; + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOk = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk{ + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")); + } + return nil +} + +func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Ok = v +} + return nil +} + +func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) } + if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) } + return err +} + +func (p *Response) Equals(other *Response) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ok != other.Ok { return false } + return true +} + +func (p *Response) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Response(%+v)", *p) +} + +type ZipkinCollector interface { + // Parameters: + // - Spans + SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) +} + +type ZipkinCollectorClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: c, + } +} + +func (p *ZipkinCollectorClient) Client_() thrift.TClient { + return p.c +} + +func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Spans +func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) { + var _args4 ZipkinCollectorSubmitZipkinBatchArgs + _args4.Spans = spans + var _result6 ZipkinCollectorSubmitZipkinBatchResult + var _meta5 thrift.ResponseMeta + _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6) + p.SetLastResponseMeta_(_meta5) + if _err != nil { + return + } + return _result6.GetSuccess(), nil +} + +type ZipkinCollectorProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler ZipkinCollector +} + +func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { + + self7 := &ZipkinCollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler:handler} +return self7 +} + +func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { return false, thrift.WrapTException(err2) } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x8.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x8 + +} + +type zipkinCollectorProcessorSubmitZipkinBatch struct { + handler ZipkinCollector +} + +func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ZipkinCollectorSubmitZipkinBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := ZipkinCollectorSubmitZipkinBatchResult{} + var retval []*Response + if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: " + err2.Error()) + oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type ZipkinCollectorSubmitZipkinBatchArgs struct { + Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"` +} + +func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { + return &ZipkinCollectorSubmitZipkinBatchArgs{} +} + + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { + return p.Spans +} +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i ++ { + _elem9 := &Span{} + if err := _elem9.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.Spans = append(p.Spans, _elem9) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ZipkinCollectorSubmitZipkinBatchResult struct { + Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { + return &ZipkinCollectorSubmitZipkinBatchResult{} +} + +var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response + +func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { + return p.Success +} +func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.LIST { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Response, 0, size) + p.Success = tSlice + for i := 0; i < size; i ++ { + _elem10 := &Response{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Success = append(p.Success, _elem10) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) +} + + diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/.nocover b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/.nocover new file mode 100644 index 000000000..e69de29bb diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/README.md new file mode 100644 index 000000000..c4c38ae01 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/README.md @@ -0,0 +1,11 @@ +# Apache Thrift + +This is a partial copy of Apache Thrift v0.14.1 (https://github.com/apache/thrift/commit/f6fa1794539e68ac294038ac388d6bde40a6c237). + +It is vendored code to avoid compatibility issues with Thrift versions. + +The file logger.go is modified to remove dependency on "testing" (see Issue #585). + +See: + * https://github.com/jaegertracing/jaeger-client-go/pull/584 + * https://github.com/jaegertracing/jaeger-client-go/pull/303 diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go new file mode 100644 index 000000000..32d5b0147 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +const ( + UNKNOWN_APPLICATION_EXCEPTION = 0 + UNKNOWN_METHOD = 1 + INVALID_MESSAGE_TYPE_EXCEPTION = 2 + WRONG_METHOD_NAME = 3 + BAD_SEQUENCE_ID = 4 + MISSING_RESULT = 5 + INTERNAL_ERROR = 6 + PROTOCOL_ERROR = 7 + INVALID_TRANSFORM = 8 + INVALID_PROTOCOL = 9 + UNSUPPORTED_CLIENT_TYPE = 10 +) + +var defaultApplicationExceptionMessage = map[int32]string{ + UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception", + UNKNOWN_METHOD: "unknown method", + INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type", + WRONG_METHOD_NAME: "wrong method name", + BAD_SEQUENCE_ID: "bad sequence ID", + MISSING_RESULT: "missing result", + INTERNAL_ERROR: "unknown internal error", + PROTOCOL_ERROR: "unknown protocol error", + INVALID_TRANSFORM: "Invalid transform", + INVALID_PROTOCOL: "Invalid protocol", + UNSUPPORTED_CLIENT_TYPE: "Unsupported client type", +} + +// Application level Thrift exception +type TApplicationException interface { + TException + TypeId() int32 + Read(ctx context.Context, iprot TProtocol) error + Write(ctx context.Context, oprot TProtocol) error +} + +type tApplicationException struct { + message string + type_ int32 +} + +var _ TApplicationException = (*tApplicationException)(nil) + +func (tApplicationException) TExceptionType() TExceptionType { + return TExceptionTypeApplication +} + +func (e tApplicationException) Error() string { + if e.message != "" { + return e.message + } + return defaultApplicationExceptionMessage[e.type_] +} + +func NewTApplicationException(type_ int32, message string) TApplicationException { + return &tApplicationException{message, type_} +} + +func (p *tApplicationException) TypeId() int32 { + return p.type_ +} + +func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error { + // TODO: this should really be generated by the compiler + _, err := iprot.ReadStructBegin(ctx) + if err != nil { + return err + } + + message := "" + type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) + + for { + _, ttype, id, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return err + } + if ttype == STOP { + break + } + switch id { + case 1: + if ttype == STRING { + if message, err = iprot.ReadString(ctx); err != nil { + return err + } + } else { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + case 2: + if ttype == I32 { + if type_, err = iprot.ReadI32(ctx); err != nil { + return err + } + } else { + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + default: + if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { + return err + } + } + if err = iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return err + } + + p.message = message + p.type_ = type_ + + return nil +} + +func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) { + err = oprot.WriteStructBegin(ctx, "TApplicationException") + if len(p.Error()) > 0 { + err = oprot.WriteFieldBegin(ctx, "message", STRING, 1) + if err != nil { + return + } + err = oprot.WriteString(ctx, p.Error()) + if err != nil { + return + } + err = oprot.WriteFieldEnd(ctx) + if err != nil { + return + } + } + err = oprot.WriteFieldBegin(ctx, "type", I32, 2) + if err != nil { + return + } + err = oprot.WriteI32(ctx, p.type_) + if err != nil { + return + } + err = oprot.WriteFieldEnd(ctx) + if err != nil { + return + } + err = oprot.WriteFieldStop(ctx) + if err != nil { + return + } + err = oprot.WriteStructEnd(ctx) + return +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go new file mode 100644 index 000000000..45c880d32 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go @@ -0,0 +1,555 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +type TBinaryProtocol struct { + trans TRichTransport + origTransport TTransport + cfg *TConfiguration + buffer [64]byte +} + +type TBinaryProtocolFactory struct { + cfg *TConfiguration +} + +// Deprecated: Use NewTBinaryProtocolConf instead. +func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { + return NewTBinaryProtocolConf(t, &TConfiguration{ + noPropagation: true, + }) +} + +// Deprecated: Use NewTBinaryProtocolConf instead. +func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { + return NewTBinaryProtocolConf(t, &TConfiguration{ + TBinaryStrictRead: &strictRead, + TBinaryStrictWrite: &strictWrite, + + noPropagation: true, + }) +} + +func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol { + PropagateTConfiguration(t, conf) + p := &TBinaryProtocol{ + origTransport: t, + cfg: conf, + } + if et, ok := t.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(t) + } + return p +} + +// Deprecated: Use NewTBinaryProtocolFactoryConf instead. +func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { + return NewTBinaryProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +// Deprecated: Use NewTBinaryProtocolFactoryConf instead. +func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { + return NewTBinaryProtocolFactoryConf(&TConfiguration{ + TBinaryStrictRead: &strictRead, + TBinaryStrictWrite: &strictWrite, + + noPropagation: true, + }) +} + +func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory { + return &TBinaryProtocolFactory{ + cfg: conf, + } +} + +func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { + return NewTBinaryProtocolConf(t, p.cfg) +} + +func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +/** + * Writing Methods + */ + +func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + if p.cfg.GetTBinaryStrictWrite() { + version := uint32(VERSION_1) | uint32(typeId) + e := p.WriteI32(ctx, int32(version)) + if e != nil { + return e + } + e = p.WriteString(ctx, name) + if e != nil { + return e + } + e = p.WriteI32(ctx, seqId) + return e + } else { + e := p.WriteString(ctx, name) + if e != nil { + return e + } + e = p.WriteByte(ctx, int8(typeId)) + if e != nil { + return e + } + e = p.WriteI32(ctx, seqId) + return e + } + return nil +} + +func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + e := p.WriteByte(ctx, int8(typeId)) + if e != nil { + return e + } + e = p.WriteI16(ctx, id) + return e +} + +func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error { + e := p.WriteByte(ctx, STOP) + return e +} + +func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + e := p.WriteByte(ctx, int8(keyType)) + if e != nil { + return e + } + e = p.WriteByte(ctx, int8(valueType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + e := p.WriteByte(ctx, int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + e := p.WriteByte(ctx, int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(ctx, int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error { + if value { + return p.WriteByte(ctx, 1) + } + return p.WriteByte(ctx, 0) +} + +func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error { + e := p.trans.WriteByte(byte(value)) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error { + v := p.buffer[0:2] + binary.BigEndian.PutUint16(v, uint16(value)) + _, e := p.trans.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error { + v := p.buffer[0:4] + binary.BigEndian.PutUint32(v, uint32(value)) + _, e := p.trans.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error { + v := p.buffer[0:8] + binary.BigEndian.PutUint64(v, uint64(value)) + _, err := p.trans.Write(v) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error { + return p.WriteI64(ctx, int64(math.Float64bits(value))) +} + +func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error { + e := p.WriteI32(ctx, int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.WriteString(value) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error { + e := p.WriteI32(ctx, int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.Write(value) + return NewTProtocolException(err) +} + +/** + * Reading methods + */ + +func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + size, e := p.ReadI32(ctx) + if e != nil { + return "", typeId, 0, NewTProtocolException(e) + } + if size < 0 { + typeId = TMessageType(size & 0x0ff) + version := int64(int64(size) & VERSION_MASK) + if version != VERSION_1 { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) + } + name, e = p.ReadString(ctx) + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + seqId, e = p.ReadI32(ctx) + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + return name, typeId, seqId, nil + } + if p.cfg.GetTBinaryStrictRead() { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) + } + name, e2 := p.readStringBody(size) + if e2 != nil { + return name, typeId, seqId, e2 + } + b, e3 := p.ReadByte(ctx) + if e3 != nil { + return name, typeId, seqId, e3 + } + typeId = TMessageType(b) + seqId, e4 := p.ReadI32(ctx) + if e4 != nil { + return name, typeId, seqId, e4 + } + return name, typeId, seqId, nil +} + +func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + return +} + +func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) { + t, err := p.ReadByte(ctx) + typeId = TType(t) + if err != nil { + return name, typeId, seqId, err + } + if t != STOP { + seqId, err = p.ReadI16(ctx) + } + return name, typeId, seqId, err +} + +func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error { + return nil +} + +var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) + +func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) { + k, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + kType = TType(k) + v, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + vType = TType(v) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return kType, vType, size, nil +} + +func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + b, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + return +} + +func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + b, e := p.ReadByte(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32(ctx) + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return elemType, size, nil +} + +func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error { + return nil +} + +func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) { + b, e := p.ReadByte(ctx) + v := true + if b != 1 { + v = false + } + return v, e +} + +func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.trans.ReadByte() + return int8(v), err +} + +func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) { + buf := p.buffer[0:2] + err = p.readAll(ctx, buf) + value = int16(binary.BigEndian.Uint16(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) { + buf := p.buffer[0:4] + err = p.readAll(ctx, buf) + value = int32(binary.BigEndian.Uint32(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) { + buf := p.buffer[0:8] + err = p.readAll(ctx, buf) + value = int64(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + buf := p.buffer[0:8] + err = p.readAll(ctx, buf) + value = math.Float64frombits(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) { + size, e := p.ReadI32(ctx) + if e != nil { + return "", e + } + err = checkSizeForProtocol(size, p.cfg) + if err != nil { + return + } + if size < 0 { + err = invalidDataLength + return + } + if size == 0 { + return "", nil + } + if size < int32(len(p.buffer)) { + // Avoid allocation on small reads + buf := p.buffer[:size] + read, e := io.ReadFull(p.trans, buf) + return string(buf[:read]), NewTProtocolException(e) + } + + return p.readStringBody(size) +} + +func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + size, e := p.ReadI32(ctx) + if e != nil { + return nil, e + } + if err := checkSizeForProtocol(size, p.cfg); err != nil { + return nil, err + } + + buf, err := safeReadBytes(size, p.trans) + return buf, NewTProtocolException(err) +} + +func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TBinaryProtocol) Transport() TTransport { + return p.origTransport +} + +func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) { + var read int + _, deadlineSet := ctx.Deadline() + for { + read, err = io.ReadFull(p.trans, buf) + if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil { + // This is I/O timeout without anything read, + // and we still have time left, keep retrying. + continue + } + // For anything else, don't retry + break + } + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { + buf, err := safeReadBytes(size, p.trans) + return string(buf), NewTProtocolException(err) +} + +func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + PropagateTConfiguration(p.origTransport, conf) + p.cfg = conf +} + +var ( + _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil) + _ TConfigurationSetter = (*TBinaryProtocol)(nil) +) + +// This function is shared between TBinaryProtocol and TCompactProtocol. +// +// It tries to read size bytes from trans, in a way that prevents large +// allocations when size is insanely large (mostly caused by malformed message). +func safeReadBytes(size int32, trans io.Reader) ([]byte, error) { + if size < 0 { + return nil, nil + } + + buf := new(bytes.Buffer) + _, err := io.CopyN(buf, trans, int64(size)) + return buf.Bytes(), err +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/client.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/client.go new file mode 100644 index 000000000..ea2c01fda --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/client.go @@ -0,0 +1,109 @@ +package thrift + +import ( + "context" + "fmt" +) + +// ResponseMeta represents the metadata attached to the response. +type ResponseMeta struct { + // The headers in the response, if any. + // If the underlying transport/protocol is not THeader, this will always be nil. + Headers THeaderMap +} + +type TClient interface { + Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) +} + +type TStandardClient struct { + seqId int32 + iprot, oprot TProtocol +} + +// TStandardClient implements TClient, and uses the standard message format for Thrift. +// It is not safe for concurrent use. +func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { + return &TStandardClient{ + iprot: inputProtocol, + oprot: outputProtocol, + } +} + +func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { + // Set headers from context object on THeaderProtocol + if headerProt, ok := oprot.(*THeaderProtocol); ok { + headerProt.ClearWriteHeaders() + for _, key := range GetWriteHeaderList(ctx) { + if value, ok := GetHeader(ctx, key); ok { + headerProt.SetWriteHeader(key, value) + } + } + } + + if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil { + return err + } + if err := args.Write(ctx, oprot); err != nil { + return err + } + if err := oprot.WriteMessageEnd(ctx); err != nil { + return err + } + return oprot.Flush(ctx) +} + +func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error { + rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx) + if err != nil { + return err + } + + if method != rMethod { + return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method)) + } else if seqId != rSeqId { + return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) + } else if rTypeId == EXCEPTION { + var exception tApplicationException + if err := exception.Read(ctx, iprot); err != nil { + return err + } + + if err := iprot.ReadMessageEnd(ctx); err != nil { + return err + } + + return &exception + } else if rTypeId != REPLY { + return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) + } + + if err := result.Read(ctx, iprot); err != nil { + return err + } + + return iprot.ReadMessageEnd(ctx) +} + +func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { + p.seqId++ + seqId := p.seqId + + if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { + return ResponseMeta{}, err + } + + // method is oneway + if result == nil { + return ResponseMeta{}, nil + } + + err := p.Recv(ctx, p.iprot, seqId, method, result) + var headers THeaderMap + if hp, ok := p.iprot.(*THeaderProtocol); ok { + headers = hp.transport.readHeaders + } + return ResponseMeta{ + Headers: headers, + }, err +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go new file mode 100644 index 000000000..a49225dab --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go @@ -0,0 +1,865 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +const ( + COMPACT_PROTOCOL_ID = 0x082 + COMPACT_VERSION = 1 + COMPACT_VERSION_MASK = 0x1f + COMPACT_TYPE_MASK = 0x0E0 + COMPACT_TYPE_BITS = 0x07 + COMPACT_TYPE_SHIFT_AMOUNT = 5 +) + +type tCompactType byte + +const ( + COMPACT_BOOLEAN_TRUE = 0x01 + COMPACT_BOOLEAN_FALSE = 0x02 + COMPACT_BYTE = 0x03 + COMPACT_I16 = 0x04 + COMPACT_I32 = 0x05 + COMPACT_I64 = 0x06 + COMPACT_DOUBLE = 0x07 + COMPACT_BINARY = 0x08 + COMPACT_LIST = 0x09 + COMPACT_SET = 0x0A + COMPACT_MAP = 0x0B + COMPACT_STRUCT = 0x0C +) + +var ( + ttypeToCompactType map[TType]tCompactType +) + +func init() { + ttypeToCompactType = map[TType]tCompactType{ + STOP: STOP, + BOOL: COMPACT_BOOLEAN_TRUE, + BYTE: COMPACT_BYTE, + I16: COMPACT_I16, + I32: COMPACT_I32, + I64: COMPACT_I64, + DOUBLE: COMPACT_DOUBLE, + STRING: COMPACT_BINARY, + LIST: COMPACT_LIST, + SET: COMPACT_SET, + MAP: COMPACT_MAP, + STRUCT: COMPACT_STRUCT, + } +} + +type TCompactProtocolFactory struct { + cfg *TConfiguration +} + +// Deprecated: Use NewTCompactProtocolFactoryConf instead. +func NewTCompactProtocolFactory() *TCompactProtocolFactory { + return NewTCompactProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory { + return &TCompactProtocolFactory{ + cfg: conf, + } +} + +func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTCompactProtocolConf(trans, p.cfg) +} + +func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +type TCompactProtocol struct { + trans TRichTransport + origTransport TTransport + + cfg *TConfiguration + + // Used to keep track of the last field for the current and previous structs, + // so we can do the delta stuff. + lastField []int + lastFieldId int + + // If we encounter a boolean field begin, save the TField here so it can + // have the value incorporated. + booleanFieldName string + booleanFieldId int16 + booleanFieldPending bool + + // If we read a field header, and it's a boolean field, save the boolean + // value here so that readBool can use it. + boolValue bool + boolValueIsNotNull bool + buffer [64]byte +} + +// Deprecated: Use NewTCompactProtocolConf instead. +func NewTCompactProtocol(trans TTransport) *TCompactProtocol { + return NewTCompactProtocolConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol { + PropagateTConfiguration(trans, conf) + p := &TCompactProtocol{ + origTransport: trans, + cfg: conf, + } + if et, ok := trans.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(trans) + } + + return p +} + +// +// Public Writing methods. +// + +// Write a message header to the wire. Compact Protocol messages contain the +// protocol version so we can migrate forwards in the future if need be. +func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + err := p.writeByteDirect(COMPACT_PROTOCOL_ID) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) + if err != nil { + return NewTProtocolException(err) + } + _, err = p.writeVarint32(seqid) + if err != nil { + return NewTProtocolException(err) + } + e := p.WriteString(ctx, name) + return e + +} + +func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil } + +// Write a struct begin. This doesn't actually put anything on the wire. We +// use it as an opportunity to put special placeholder markers on the field +// stack so we can get the field id deltas correct. +func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return nil +} + +// Write a struct end. This doesn't actually put anything on the wire. We use +// this as an opportunity to pop the last field from the current struct off +// of the field stack. +func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error { + if len(p.lastField) <= 0 { + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before")) + } + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if typeId == BOOL { + // we want to possibly include the value, so we'll wait. + p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true + return nil + } + _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF) + return NewTProtocolException(err) +} + +// The workhorse of writeFieldBegin. It has the option of doing a +// 'type override' of the type header. This is used specifically in the +// boolean field case. +func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) { + // short lastField = lastField_.pop(); + + // if there's a type override, use that. + var typeToWrite byte + if typeOverride == 0xFF { + typeToWrite = byte(p.getCompactType(typeId)) + } else { + typeToWrite = typeOverride + } + // check if we can use delta encoding for the field id + fieldId := int(id) + written := 0 + if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { + // write them together + err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) + if err != nil { + return 0, err + } + } else { + // write them separate + err := p.writeByteDirect(typeToWrite) + if err != nil { + return 0, err + } + err = p.WriteI16(ctx, id) + written = 1 + 2 + if err != nil { + return 0, err + } + } + + p.lastFieldId = fieldId + return written, nil +} + +func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil } + +func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error { + err := p.writeByteDirect(STOP) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + if size == 0 { + err := p.writeByteDirect(0) + return NewTProtocolException(err) + } + _, err := p.writeVarint32(int32(size)) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil } + +// Write a list header. +func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil } + +// Write a set header. +func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil } + +func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error { + v := byte(COMPACT_BOOLEAN_FALSE) + if value { + v = byte(COMPACT_BOOLEAN_TRUE) + } + if p.booleanFieldPending { + // we haven't written the field header yet + _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v) + p.booleanFieldPending = false + return NewTProtocolException(err) + } + // we're not part of a field, so just write the value. + err := p.writeByteDirect(v) + return NewTProtocolException(err) +} + +// Write a byte. Nothing to see here! +func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error { + err := p.writeByteDirect(byte(value)) + return NewTProtocolException(err) +} + +// Write an I16 as a zigzag varint. +func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error { + _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) + return NewTProtocolException(err) +} + +// Write an i32 as a zigzag varint. +func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error { + _, err := p.writeVarint32(p.int32ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write an i64 as a zigzag varint. +func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error { + _, err := p.writeVarint64(p.int64ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write a double to the wire as 8 bytes. +func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error { + buf := p.buffer[0:8] + binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) + _, err := p.trans.Write(buf) + return NewTProtocolException(err) +} + +// Write a string to the wire with a varint size preceding. +func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error { + _, e := p.writeVarint32(int32(len(value))) + if e != nil { + return NewTProtocolException(e) + } + if len(value) > 0 { + } + _, e = p.trans.WriteString(value) + return e +} + +// Write a byte array, using a varint for the size. +func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error { + _, e := p.writeVarint32(int32(len(bin))) + if e != nil { + return NewTProtocolException(e) + } + if len(bin) > 0 { + _, e = p.trans.Write(bin) + return NewTProtocolException(e) + } + return nil +} + +// +// Reading methods. +// + +// Read a message header. +func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + var protocolId byte + + _, deadlineSet := ctx.Deadline() + for { + protocolId, err = p.readByteDirect() + if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { + // keep retrying I/O timeout errors since we still have + // time left + continue + } + // For anything else, don't retry + break + } + if err != nil { + return + } + + if protocolId != COMPACT_PROTOCOL_ID { + e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) + return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) + } + + versionAndType, err := p.readByteDirect() + if err != nil { + return + } + + version := versionAndType & COMPACT_VERSION_MASK + typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) + if version != COMPACT_VERSION { + e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) + err = NewTProtocolExceptionWithType(BAD_VERSION, e) + return + } + seqId, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + name, err = p.ReadString(ctx) + return +} + +func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil } + +// Read a struct begin. There's nothing on the wire for this, but it is our +// opportunity to push a new struct begin marker onto the field stack. +func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return +} + +// Doesn't actually consume any wire data, just removes the last field for +// this struct from the field stack. +func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error { + // consume the last field we read off the wire. + if len(p.lastField) <= 0 { + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before")) + } + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +// Read a field header off the wire. +func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { + t, err := p.readByteDirect() + if err != nil { + return + } + + // if it's a stop, then we can return immediately, as the struct is over. + if (t & 0x0f) == STOP { + return "", STOP, 0, nil + } + + // mask off the 4 MSB of the type header. it could contain a field id delta. + modifier := int16((t & 0xf0) >> 4) + if modifier == 0 { + // not a delta. look ahead for the zigzag varint field id. + id, err = p.ReadI16(ctx) + if err != nil { + return + } + } else { + // has a delta. add the delta to the last read field id. + id = int16(p.lastFieldId) + modifier + } + typeId, e := p.getTType(tCompactType(t & 0x0f)) + if e != nil { + err = NewTProtocolException(e) + return + } + + // if this happens to be a boolean field, the value is encoded in the type + if p.isBoolType(t) { + // save the boolean value in a special instance variable. + p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) + p.boolValueIsNotNull = true + } + + // push the new field onto the field stack so we can keep the deltas going. + p.lastFieldId = int(id) + return +} + +func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil } + +// Read a map header off the wire. If the size is zero, skip reading the key +// and value type. This means that 0-length maps will yield TMaps without the +// "correct" types. +func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + size32, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + keyAndValueType := byte(STOP) + if size != 0 { + keyAndValueType, err = p.readByteDirect() + if err != nil { + return + } + } + keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) + valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) + return +} + +func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil } + +// Read a list header off the wire. If the list size is 0-14, the size will +// be packed into the element type header. If it's a longer list, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + size_and_type, err := p.readByteDirect() + if err != nil { + return + } + size = int((size_and_type >> 4) & 0x0f) + if size == 15 { + size2, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size2 < 0 { + err = invalidDataLength + return + } + size = int(size2) + } + elemType, e := p.getTType(tCompactType(size_and_type)) + if e != nil { + err = NewTProtocolException(e) + return + } + return +} + +func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil } + +// Read a set header off the wire. If the set size is 0-14, the size will +// be packed into the element type header. If it's a longer set, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.ReadListBegin(ctx) +} + +func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil } + +// Read a boolean off the wire. If this is a boolean field, the value should +// already have been read during readFieldBegin, so we'll just consume the +// pre-stored value. Otherwise, read a byte. +func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) { + if p.boolValueIsNotNull { + p.boolValueIsNotNull = false + return p.boolValue, nil + } + v, err := p.readByteDirect() + return v == COMPACT_BOOLEAN_TRUE, err +} + +// Read a single byte off the wire. Nothing interesting here. +func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.readByteDirect() + if err != nil { + return 0, NewTProtocolException(err) + } + return int8(v), err +} + +// Read an i16 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) { + v, err := p.ReadI32(ctx) + return int16(v), err +} + +// Read an i32 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) { + v, e := p.readVarint32() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt32(v) + return value, nil +} + +// Read an i64 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) { + v, e := p.readVarint64() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt64(v) + return value, nil +} + +// No magic here - just read a double off the wire. +func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + longBits := p.buffer[0:8] + _, e := io.ReadFull(p.trans, longBits) + if e != nil { + return 0.0, NewTProtocolException(e) + } + return math.Float64frombits(p.bytesToUint64(longBits)), nil +} + +// Reads a []byte (via readBinary), and then UTF-8 decodes it. +func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) { + length, e := p.readVarint32() + if e != nil { + return "", NewTProtocolException(e) + } + err = checkSizeForProtocol(length, p.cfg) + if err != nil { + return + } + if length == 0 { + return "", nil + } + if length < int32(len(p.buffer)) { + // Avoid allocation on small reads + buf := p.buffer[:length] + read, e := io.ReadFull(p.trans, buf) + return string(buf[:read]), NewTProtocolException(e) + } + + buf, e := safeReadBytes(length, p.trans) + return string(buf), NewTProtocolException(e) +} + +// Read a []byte from the wire. +func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + length, e := p.readVarint32() + if e != nil { + return nil, NewTProtocolException(e) + } + err = checkSizeForProtocol(length, p.cfg) + if err != nil { + return + } + if length == 0 { + return []byte{}, nil + } + + buf, e := safeReadBytes(length, p.trans) + return buf, NewTProtocolException(e) +} + +func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TCompactProtocol) Transport() TTransport { + return p.origTransport +} + +// +// Internal writing methods +// + +// Abstract method for writing the start of lists and sets. List and sets on +// the wire differ only by the type indicator. +func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { + if size <= 14 { + return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) + } + err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) + if err != nil { + return 0, err + } + m, err := p.writeVarint32(int32(size)) + return 1 + m, err +} + +// Write an i32 as a varint. Results in 1-5 bytes on the wire. +// TODO(pomack): make a permanent buffer like writeVarint64? +func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { + i32buf := p.buffer[0:5] + idx := 0 + for { + if (n & ^0x7F) == 0 { + i32buf[idx] = byte(n) + idx++ + // p.writeByteDirect(byte(n)); + break + // return; + } else { + i32buf[idx] = byte((n & 0x7F) | 0x80) + idx++ + // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); + u := uint32(n) + n = int32(u >> 7) + } + } + return p.trans.Write(i32buf[0:idx]) +} + +// Write an i64 as a varint. Results in 1-10 bytes on the wire. +func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { + varint64out := p.buffer[0:10] + idx := 0 + for { + if (n & ^0x7F) == 0 { + varint64out[idx] = byte(n) + idx++ + break + } else { + varint64out[idx] = byte((n & 0x7F) | 0x80) + idx++ + u := uint64(n) + n = int64(u >> 7) + } + } + return p.trans.Write(varint64out[0:idx]) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { + return (l << 1) ^ (l >> 63) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { + return (n << 1) ^ (n >> 31) +} + +func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { + binary.LittleEndian.PutUint64(buf, n) +} + +func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { + binary.LittleEndian.PutUint64(buf, uint64(n)) +} + +// Writes a byte without any possibility of all that field header nonsense. +// Used internally by other writing methods that know they need to write a byte. +func (p *TCompactProtocol) writeByteDirect(b byte) error { + return p.trans.WriteByte(b) +} + +// Writes a byte without any possibility of all that field header nonsense. +func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { + return 1, p.writeByteDirect(byte(n)) +} + +// +// Internal reading methods +// + +// Read an i32 from the wire as a varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 5 bytes. +func (p *TCompactProtocol) readVarint32() (int32, error) { + // if the wire contains the right stuff, this will just truncate the i64 we + // read and get us the right sign. + v, err := p.readVarint64() + return int32(v), err +} + +// Read an i64 from the wire as a proper varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 10 bytes. +func (p *TCompactProtocol) readVarint64() (int64, error) { + shift := uint(0) + result := int64(0) + for { + b, err := p.readByteDirect() + if err != nil { + return 0, err + } + result |= int64(b&0x7f) << shift + if (b & 0x80) != 0x80 { + break + } + shift += 7 + } + return result, nil +} + +// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. +func (p *TCompactProtocol) readByteDirect() (byte, error) { + return p.trans.ReadByte() +} + +// +// encoding helpers +// + +// Convert from zigzag int to int. +func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { + u := uint32(n) + return int32(u>>1) ^ -(n & 1) +} + +// Convert from zigzag long to long. +func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { + u := uint64(n) + return int64(u>>1) ^ -(n & 1) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { + return int64(binary.LittleEndian.Uint64(b)) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { + return binary.LittleEndian.Uint64(b) +} + +// +// type testing and converting +// + +func (p *TCompactProtocol) isBoolType(b byte) bool { + return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE +} + +// Given a tCompactType constant, convert it to its corresponding +// TType value. +func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { + switch byte(t) & 0x0f { + case STOP: + return STOP, nil + case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: + return BOOL, nil + case COMPACT_BYTE: + return BYTE, nil + case COMPACT_I16: + return I16, nil + case COMPACT_I32: + return I32, nil + case COMPACT_I64: + return I64, nil + case COMPACT_DOUBLE: + return DOUBLE, nil + case COMPACT_BINARY: + return STRING, nil + case COMPACT_LIST: + return LIST, nil + case COMPACT_SET: + return SET, nil + case COMPACT_MAP: + return MAP, nil + case COMPACT_STRUCT: + return STRUCT, nil + } + return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f)) +} + +// Given a TType value, find the appropriate TCompactProtocol.Types constant. +func (p *TCompactProtocol) getCompactType(t TType) tCompactType { + return ttypeToCompactType[t] +} + +func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) + PropagateTConfiguration(p.origTransport, conf) + p.cfg = conf +} + +var ( + _ TConfigurationSetter = (*TCompactProtocolFactory)(nil) + _ TConfigurationSetter = (*TCompactProtocol)(nil) +) diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go new file mode 100644 index 000000000..454d9f377 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "fmt" + "time" +) + +// Default TConfiguration values. +const ( + DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024 + DEFAULT_MAX_FRAME_SIZE = 16384000 + + DEFAULT_TBINARY_STRICT_READ = false + DEFAULT_TBINARY_STRICT_WRITE = true + + DEFAULT_CONNECT_TIMEOUT = 0 + DEFAULT_SOCKET_TIMEOUT = 0 +) + +// TConfiguration defines some configurations shared between TTransport, +// TProtocol, TTransportFactory, TProtocolFactory, and other implementations. +// +// When constructing TConfiguration, you only need to specify the non-default +// fields. All zero values have sane default values. +// +// Not all configurations defined are applicable to all implementations. +// Implementations are free to ignore the configurations not applicable to them. +// +// All functions attached to this type are nil-safe. +// +// See [1] for spec. +// +// NOTE: When using TConfiguration, fill in all the configurations you want to +// set across the stack, not only the ones you want to set in the immediate +// TTransport/TProtocol. +// +// For example, say you want to migrate this old code into using TConfiguration: +// +// sccket := thrift.NewTSocketTimeout("host:port", time.Second) +// transFactory := thrift.NewTFramedTransportFactoryMaxLength( +// thrift.NewTTransportFactory(), +// 1024 * 1024 * 256, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) +// +// This is the wrong way to do it because in the end the TConfiguration used by +// socket and transFactory will be overwritten by the one used by protoFactory +// because of TConfiguration propagation: +// +// // bad example, DO NOT USE +// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// }) +// transFactory := thrift.NewTFramedTransportFactoryConf( +// thrift.NewTTransportFactory(), +// &thrift.TConfiguration{ +// MaxFrameSize: 1024 * 1024 * 256, +// }, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// }) +// +// This is the correct way to do it: +// +// conf := &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// +// MaxFrameSize: 1024 * 1024 * 256, +// +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// } +// sccket := thrift.NewTSocketConf("host:port", conf) +// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) +// +// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md +type TConfiguration struct { + // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead. + MaxMessageSize int32 + + // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead. + // + // Also if MaxMessageSize < MaxFrameSize, + // MaxMessageSize will be used instead. + MaxFrameSize int32 + + // Connect and socket timeouts to be used by TSocket and TSSLSocket. + // + // 0 means no timeout. + // + // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be + // used. + ConnectTimeout time.Duration + SocketTimeout time.Duration + + // TLS config to be used by TSSLSocket. + TLSConfig *tls.Config + + // Strict read/write configurations for TBinaryProtocol. + // + // BoolPtr helper function is available to use literal values. + TBinaryStrictRead *bool + TBinaryStrictWrite *bool + + // The wrapped protocol id to be used in THeader transport/protocol. + // + // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions + // are provided to help filling this value. + THeaderProtocolID *THeaderProtocolID + + // Used internally by deprecated constructors, to avoid overriding + // underlying TTransport/TProtocol's cfg by accidental propagations. + // + // For external users this is always false. + noPropagation bool +} + +// GetMaxMessageSize returns the max message size an implementation should +// follow. +// +// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil. +func (tc *TConfiguration) GetMaxMessageSize() int32 { + if tc == nil || tc.MaxMessageSize <= 0 { + return DEFAULT_MAX_MESSAGE_SIZE + } + return tc.MaxMessageSize +} + +// GetMaxFrameSize returns the max frame size an implementation should follow. +// +// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil. +// +// If the configured max message size is smaller than the configured max frame +// size, the smaller one will be returned instead. +func (tc *TConfiguration) GetMaxFrameSize() int32 { + if tc == nil { + return DEFAULT_MAX_FRAME_SIZE + } + maxFrameSize := tc.MaxFrameSize + if maxFrameSize <= 0 { + maxFrameSize = DEFAULT_MAX_FRAME_SIZE + } + if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize { + return maxMessageSize + } + return maxFrameSize +} + +// GetConnectTimeout returns the connect timeout should be used by TSocket and +// TSSLSocket. +// +// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead. +func (tc *TConfiguration) GetConnectTimeout() time.Duration { + if tc == nil || tc.ConnectTimeout < 0 { + return DEFAULT_CONNECT_TIMEOUT + } + return tc.ConnectTimeout +} + +// GetSocketTimeout returns the socket timeout should be used by TSocket and +// TSSLSocket. +// +// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead. +func (tc *TConfiguration) GetSocketTimeout() time.Duration { + if tc == nil || tc.SocketTimeout < 0 { + return DEFAULT_SOCKET_TIMEOUT + } + return tc.SocketTimeout +} + +// GetTLSConfig returns the tls config should be used by TSSLSocket. +// +// It's nil-safe. If tc is nil, nil will be returned instead. +func (tc *TConfiguration) GetTLSConfig() *tls.Config { + if tc == nil { + return nil + } + return tc.TLSConfig +} + +// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol +// should follow. +// +// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or +// tc.TBinaryStrictRead is nil. +func (tc *TConfiguration) GetTBinaryStrictRead() bool { + if tc == nil || tc.TBinaryStrictRead == nil { + return DEFAULT_TBINARY_STRICT_READ + } + return *tc.TBinaryStrictRead +} + +// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol +// should follow. +// +// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or +// tc.TBinaryStrictWrite is nil. +func (tc *TConfiguration) GetTBinaryStrictWrite() bool { + if tc == nil || tc.TBinaryStrictWrite == nil { + return DEFAULT_TBINARY_STRICT_WRITE + } + return *tc.TBinaryStrictWrite +} + +// GetTHeaderProtocolID returns the THeaderProtocolID should be used by +// THeaderProtocol clients (for servers, they always use the same one as the +// client instead). +// +// It's nil-safe. If either tc or tc.THeaderProtocolID is nil, +// THeaderProtocolDefault will be returned instead. +// THeaderProtocolDefault will also be returned if configured value is invalid. +func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { + if tc == nil || tc.THeaderProtocolID == nil { + return THeaderProtocolDefault + } + protoID := *tc.THeaderProtocolID + if err := protoID.Validate(); err != nil { + return THeaderProtocolDefault + } + return protoID +} + +// THeaderProtocolIDPtr validates and returns the pointer to id. +// +// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault +// and the validation error will be returned. +func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) { + err := id.Validate() + if err != nil { + id = THeaderProtocolDefault + } + return &id, err +} + +// THeaderProtocolIDPtrMust validates and returns the pointer to id. +// +// It's similar to THeaderProtocolIDPtr, but it panics on validation errors +// instead of returning them. +func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID { + ptr, err := THeaderProtocolIDPtr(id) + if err != nil { + panic(err) + } + return ptr +} + +// TConfigurationSetter is an optional interface TProtocol, TTransport, +// TProtocolFactory, TTransportFactory, and other implementations can implement. +// +// It's intended to be called during intializations. +// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the +// middle of a message is undefined: +// It may or may not change the behavior of the current processing message, +// and it may even cause the current message to fail. +// +// Note for implementations: SetTConfiguration might be called multiple times +// with the same value in quick successions due to the implementation of the +// propagation. Implementations should make SetTConfiguration as simple as +// possible (usually just overwrite the stored configuration and propagate it to +// the wrapped TTransports/TProtocols). +type TConfigurationSetter interface { + SetTConfiguration(*TConfiguration) +} + +// PropagateTConfiguration propagates cfg to impl if impl implements +// TConfigurationSetter and cfg is non-nil, otherwise it does nothing. +// +// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration +// with everything being default value, use &TConfiguration{} explicitly instead. +func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) { + if cfg == nil || cfg.noPropagation { + return + } + + if setter, ok := impl.(TConfigurationSetter); ok { + setter.SetTConfiguration(cfg) + } +} + +func checkSizeForProtocol(size int32, cfg *TConfiguration) error { + if size < 0 { + return NewTProtocolExceptionWithType( + NEGATIVE_SIZE, + fmt.Errorf("negative size: %d", size), + ) + } + if size > cfg.GetMaxMessageSize() { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + fmt.Errorf("size exceeded max allowed: %d", size), + ) + } + return nil +} + +type tTransportFactoryConf struct { + delegate TTransportFactory + cfg *TConfiguration +} + +func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) { + trans, err := f.delegate.GetTransport(orig) + if err == nil { + PropagateTConfiguration(orig, f.cfg) + PropagateTConfiguration(trans, f.cfg) + } + return trans, err +} + +func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.delegate, f.cfg) + f.cfg = cfg +} + +// TTransportFactoryConf wraps a TTransportFactory to propagate +// TConfiguration on the factory's GetTransport calls. +func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory { + return &tTransportFactoryConf{ + delegate: delegate, + cfg: conf, + } +} + +type tProtocolFactoryConf struct { + delegate TProtocolFactory + cfg *TConfiguration +} + +func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol { + proto := f.delegate.GetProtocol(trans) + PropagateTConfiguration(trans, f.cfg) + PropagateTConfiguration(proto, f.cfg) + return proto +} + +func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.delegate, f.cfg) + f.cfg = cfg +} + +// TProtocolFactoryConf wraps a TProtocolFactory to propagate +// TConfiguration on the factory's GetProtocol calls. +func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory { + return &tProtocolFactoryConf{ + delegate: delegate, + cfg: conf, + } +} + +var ( + _ TConfigurationSetter = (*tTransportFactoryConf)(nil) + _ TConfigurationSetter = (*tProtocolFactoryConf)(nil) +) diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/context.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/context.go new file mode 100644 index 000000000..d15c1bcf8 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/context.go @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +var defaultCtx = context.Background() diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/exception.go new file mode 100644 index 000000000..53bf862ea --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/exception.go @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" +) + +// Generic Thrift exception +type TException interface { + error + + TExceptionType() TExceptionType +} + +// Prepends additional information to an error without losing the Thrift exception interface +func PrependError(prepend string, err error) error { + msg := prepend + err.Error() + + var te TException + if errors.As(err, &te) { + switch te.TExceptionType() { + case TExceptionTypeTransport: + if t, ok := err.(TTransportException); ok { + return prependTTransportException(prepend, t) + } + case TExceptionTypeProtocol: + if t, ok := err.(TProtocolException); ok { + return prependTProtocolException(prepend, t) + } + case TExceptionTypeApplication: + var t TApplicationException + if errors.As(err, &t) { + return NewTApplicationException(t.TypeId(), msg) + } + } + + return wrappedTException{ + err: err, + msg: msg, + tExceptionType: te.TExceptionType(), + } + } + + return errors.New(msg) +} + +// TExceptionType is an enum type to categorize different "subclasses" of TExceptions. +type TExceptionType byte + +// TExceptionType values +const ( + TExceptionTypeUnknown TExceptionType = iota + TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler + TExceptionTypeApplication // TApplicationExceptions + TExceptionTypeProtocol // TProtocolExceptions + TExceptionTypeTransport // TTransportExceptions +) + +// WrapTException wraps an error into TException. +// +// If err is nil or already TException, it's returned as-is. +// Otherwise it will be wraped into TException with TExceptionType() returning +// TExceptionTypeUnknown, and Unwrap() returning the original error. +func WrapTException(err error) TException { + if err == nil { + return nil + } + + if te, ok := err.(TException); ok { + return te + } + + return wrappedTException{ + err: err, + msg: err.Error(), + tExceptionType: TExceptionTypeUnknown, + } +} + +type wrappedTException struct { + err error + msg string + tExceptionType TExceptionType +} + +func (w wrappedTException) Error() string { + return w.msg +} + +func (w wrappedTException) TExceptionType() TExceptionType { + return w.tExceptionType +} + +func (w wrappedTException) Unwrap() error { + return w.err +} + +var _ TException = wrappedTException{} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go new file mode 100644 index 000000000..ca2556882 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs. +type ( + headerKey string + headerKeyList int +) + +// Values for headerKeyList. +const ( + headerKeyListRead headerKeyList = iota + headerKeyListWrite +) + +// SetHeader sets a header in the context. +func SetHeader(ctx context.Context, key, value string) context.Context { + return context.WithValue( + ctx, + headerKey(key), + value, + ) +} + +// UnsetHeader unsets a previously set header in the context. +func UnsetHeader(ctx context.Context, key string) context.Context { + return context.WithValue( + ctx, + headerKey(key), + nil, + ) +} + +// GetHeader returns a value of the given header from the context. +func GetHeader(ctx context.Context, key string) (value string, ok bool) { + if v := ctx.Value(headerKey(key)); v != nil { + value, ok = v.(string) + } + return +} + +// SetReadHeaderList sets the key list of read THeaders in the context. +func SetReadHeaderList(ctx context.Context, keys []string) context.Context { + return context.WithValue( + ctx, + headerKeyListRead, + keys, + ) +} + +// GetReadHeaderList returns the key list of read THeaders from the context. +func GetReadHeaderList(ctx context.Context) []string { + if v := ctx.Value(headerKeyListRead); v != nil { + if value, ok := v.([]string); ok { + return value + } + } + return nil +} + +// SetWriteHeaderList sets the key list of THeaders to write in the context. +func SetWriteHeaderList(ctx context.Context, keys []string) context.Context { + return context.WithValue( + ctx, + headerKeyListWrite, + keys, + ) +} + +// GetWriteHeaderList returns the key list of THeaders to write from the context. +func GetWriteHeaderList(ctx context.Context) []string { + if v := ctx.Value(headerKeyListWrite); v != nil { + if value, ok := v.([]string); ok { + return value + } + } + return nil +} + +// AddReadTHeaderToContext adds the whole THeader headers into context. +func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context { + keys := make([]string, 0, len(headers)) + for key, value := range headers { + ctx = SetHeader(ctx, key, value) + keys = append(keys, key) + } + return SetReadHeaderList(ctx, keys) +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go new file mode 100644 index 000000000..878041f8d --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go @@ -0,0 +1,351 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" +) + +// THeaderProtocol is a thrift protocol that implements THeader: +// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md +// +// It supports either binary or compact protocol as the wrapped protocol. +// +// Most of the THeader handlings are happening inside THeaderTransport. +type THeaderProtocol struct { + transport *THeaderTransport + + // Will be initialized on first read/write. + protocol TProtocol + + cfg *TConfiguration +} + +// Deprecated: Use NewTHeaderProtocolConf instead. +func NewTHeaderProtocol(trans TTransport) *THeaderProtocol { + return newTHeaderProtocolConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying +// transport with given TConfiguration. +// +// The passed in transport will be wrapped with THeaderTransport. +// +// Note that THeaderTransport handles frame and zlib by itself, +// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), +// instead of rich transports like TZlibTransport or TFramedTransport. +func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol { + return newTHeaderProtocolConf(trans, conf) +} + +func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol { + t := NewTHeaderTransportConf(trans, cfg) + p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t) + PropagateTConfiguration(p, cfg) + return &THeaderProtocol{ + transport: t, + protocol: p, + cfg: cfg, + } +} + +type tHeaderProtocolFactory struct { + cfg *TConfiguration +} + +func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return newTHeaderProtocolConf(trans, f.cfg) +} + +func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) { + f.cfg = cfg +} + +// Deprecated: Use NewTHeaderProtocolFactoryConf instead. +func NewTHeaderProtocolFactory() TProtocolFactory { + return NewTHeaderProtocolFactoryConf(&TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderProtocolFactoryConf creates a factory for THeader with given +// TConfiguration. +func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory { + return tHeaderProtocolFactory{ + cfg: conf, + } +} + +// Transport returns the underlying transport. +// +// It's guaranteed to be of type *THeaderTransport. +func (p *THeaderProtocol) Transport() TTransport { + return p.transport +} + +// GetReadHeaders returns the THeaderMap read from transport. +func (p *THeaderProtocol) GetReadHeaders() THeaderMap { + return p.transport.GetReadHeaders() +} + +// SetWriteHeader sets a header for write. +func (p *THeaderProtocol) SetWriteHeader(key, value string) { + p.transport.SetWriteHeader(key, value) +} + +// ClearWriteHeaders clears all write headers previously set. +func (p *THeaderProtocol) ClearWriteHeaders() { + p.transport.ClearWriteHeaders() +} + +// AddTransform add a transform for writing. +func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { + return p.transport.AddTransform(transform) +} + +func (p *THeaderProtocol) Flush(ctx context.Context) error { + return p.transport.Flush(ctx) +} + +func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error { + newProto, err := p.transport.Protocol().GetProtocol(p.transport) + if err != nil { + return err + } + PropagateTConfiguration(newProto, p.cfg) + p.protocol = newProto + p.transport.SequenceID = seqID + return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID) +} + +func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error { + if err := p.protocol.WriteMessageEnd(ctx); err != nil { + return err + } + return p.transport.Flush(ctx) +} + +func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error { + return p.protocol.WriteStructBegin(ctx, name) +} + +func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error { + return p.protocol.WriteStructEnd(ctx) +} + +func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error { + return p.protocol.WriteFieldBegin(ctx, name, typeID, id) +} + +func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error { + return p.protocol.WriteFieldEnd(ctx) +} + +func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error { + return p.protocol.WriteFieldStop(ctx) +} + +func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + return p.protocol.WriteMapBegin(ctx, keyType, valueType, size) +} + +func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error { + return p.protocol.WriteMapEnd(ctx) +} + +func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.protocol.WriteListBegin(ctx, elemType, size) +} + +func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error { + return p.protocol.WriteListEnd(ctx) +} + +func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.protocol.WriteSetBegin(ctx, elemType, size) +} + +func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error { + return p.protocol.WriteSetEnd(ctx) +} + +func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error { + return p.protocol.WriteBool(ctx, value) +} + +func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error { + return p.protocol.WriteByte(ctx, value) +} + +func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error { + return p.protocol.WriteI16(ctx, value) +} + +func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error { + return p.protocol.WriteI32(ctx, value) +} + +func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error { + return p.protocol.WriteI64(ctx, value) +} + +func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error { + return p.protocol.WriteDouble(ctx, value) +} + +func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error { + return p.protocol.WriteString(ctx, value) +} + +func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error { + return p.protocol.WriteBinary(ctx, value) +} + +// ReadFrame calls underlying THeaderTransport's ReadFrame function. +func (p *THeaderProtocol) ReadFrame(ctx context.Context) error { + return p.transport.ReadFrame(ctx) +} + +func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) { + if err = p.transport.ReadFrame(ctx); err != nil { + return + } + + var newProto TProtocol + newProto, err = p.transport.Protocol().GetProtocol(p.transport) + if err != nil { + var tAppExc TApplicationException + if !errors.As(err, &tAppExc) { + return + } + if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil { + return + } + if e := tAppExc.Write(ctx, p.protocol); e != nil { + return + } + if e := p.protocol.WriteMessageEnd(ctx); e != nil { + return + } + if e := p.transport.Flush(ctx); e != nil { + return + } + return + } + PropagateTConfiguration(newProto, p.cfg) + p.protocol = newProto + + return p.protocol.ReadMessageBegin(ctx) +} + +func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error { + return p.protocol.ReadMessageEnd(ctx) +} + +func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + return p.protocol.ReadStructBegin(ctx) +} + +func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error { + return p.protocol.ReadStructEnd(ctx) +} + +func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) { + return p.protocol.ReadFieldBegin(ctx) +} + +func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error { + return p.protocol.ReadFieldEnd(ctx) +} + +func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + return p.protocol.ReadMapBegin(ctx) +} + +func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error { + return p.protocol.ReadMapEnd(ctx) +} + +func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.protocol.ReadListBegin(ctx) +} + +func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error { + return p.protocol.ReadListEnd(ctx) +} + +func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + return p.protocol.ReadSetBegin(ctx) +} + +func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error { + return p.protocol.ReadSetEnd(ctx) +} + +func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) { + return p.protocol.ReadBool(ctx) +} + +func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) { + return p.protocol.ReadByte(ctx) +} + +func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) { + return p.protocol.ReadI16(ctx) +} + +func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) { + return p.protocol.ReadI32(ctx) +} + +func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) { + return p.protocol.ReadI64(ctx) +} + +func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + return p.protocol.ReadDouble(ctx) +} + +func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) { + return p.protocol.ReadString(ctx) +} + +func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + return p.protocol.ReadBinary(ctx) +} + +func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error { + return p.protocol.Skip(ctx, fieldType) +} + +// SetTConfiguration implements TConfigurationSetter. +func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.transport, cfg) + PropagateTConfiguration(p.protocol, cfg) + p.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil) + _ TConfigurationSetter = (*THeaderProtocol)(nil) +) diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go new file mode 100644 index 000000000..f5736df42 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go @@ -0,0 +1,810 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "compress/zlib" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" +) + +// Size in bytes for 32-bit ints. +const size32 = 4 + +type headerMeta struct { + MagicFlags uint32 + SequenceID int32 + HeaderLength uint16 +} + +const headerMetaSize = 10 + +type clientType int + +const ( + clientUnknown clientType = iota + clientHeaders + clientFramedBinary + clientUnframedBinary + clientFramedCompact + clientUnframedCompact +) + +// Constants defined in THeader format: +// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md +const ( + THeaderHeaderMagic uint32 = 0x0fff0000 + THeaderHeaderMask uint32 = 0xffff0000 + THeaderFlagsMask uint32 = 0x0000ffff + THeaderMaxFrameSize uint32 = 0x3fffffff +) + +// THeaderMap is the type of the header map in THeader transport. +type THeaderMap map[string]string + +// THeaderProtocolID is the wrapped protocol id used in THeader. +type THeaderProtocolID int32 + +// Supported THeaderProtocolID values. +const ( + THeaderProtocolBinary THeaderProtocolID = 0x00 + THeaderProtocolCompact THeaderProtocolID = 0x02 + THeaderProtocolDefault = THeaderProtocolBinary +) + +// Declared globally to avoid repetitive allocations, not really used. +var globalMemoryBuffer = NewTMemoryBuffer() + +// Validate checks whether the THeaderProtocolID is a valid/supported one. +func (id THeaderProtocolID) Validate() error { + _, err := id.GetProtocol(globalMemoryBuffer) + return err +} + +// GetProtocol gets the corresponding TProtocol from the wrapped protocol id. +func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { + switch id { + default: + return nil, NewTApplicationException( + INVALID_PROTOCOL, + fmt.Sprintf("THeader protocol id %d not supported", id), + ) + case THeaderProtocolBinary: + return NewTBinaryProtocolTransport(trans), nil + case THeaderProtocolCompact: + return NewTCompactProtocol(trans), nil + } +} + +// THeaderTransformID defines the numeric id of the transform used. +type THeaderTransformID int32 + +// THeaderTransformID values. +// +// Values not defined here are not currently supported, namely HMAC and Snappy. +const ( + TransformNone THeaderTransformID = iota // 0, no special handling + TransformZlib // 1, zlib +) + +var supportedTransformIDs = map[THeaderTransformID]bool{ + TransformNone: true, + TransformZlib: true, +} + +// TransformReader is an io.ReadCloser that handles transforms reading. +type TransformReader struct { + io.Reader + + closers []io.Closer +} + +var _ io.ReadCloser = (*TransformReader)(nil) + +// NewTransformReaderWithCapacity initializes a TransformReader with expected +// closers capacity. +// +// If you don't know the closers capacity beforehand, just use +// +// &TransformReader{Reader: baseReader} +// +// instead would be sufficient. +func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader { + return &TransformReader{ + Reader: baseReader, + closers: make([]io.Closer, 0, capacity), + } +} + +// Close calls the underlying closers in appropriate order, +// stops at and returns the first error encountered. +func (tr *TransformReader) Close() error { + // Call closers in reversed order + for i := len(tr.closers) - 1; i >= 0; i-- { + if err := tr.closers[i].Close(); err != nil { + return err + } + } + return nil +} + +// AddTransform adds a transform. +func (tr *TransformReader) AddTransform(id THeaderTransformID) error { + switch id { + default: + return NewTApplicationException( + INVALID_TRANSFORM, + fmt.Sprintf("THeaderTransformID %d not supported", id), + ) + case TransformNone: + // no-op + case TransformZlib: + readCloser, err := zlib.NewReader(tr.Reader) + if err != nil { + return err + } + tr.Reader = readCloser + tr.closers = append(tr.closers, readCloser) + } + return nil +} + +// TransformWriter is an io.WriteCloser that handles transforms writing. +type TransformWriter struct { + io.Writer + + closers []io.Closer +} + +var _ io.WriteCloser = (*TransformWriter)(nil) + +// NewTransformWriter creates a new TransformWriter with base writer and transforms. +func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) { + writer := &TransformWriter{ + Writer: baseWriter, + closers: make([]io.Closer, 0, len(transforms)), + } + for _, id := range transforms { + if err := writer.AddTransform(id); err != nil { + return nil, err + } + } + return writer, nil +} + +// Close calls the underlying closers in appropriate order, +// stops at and returns the first error encountered. +func (tw *TransformWriter) Close() error { + // Call closers in reversed order + for i := len(tw.closers) - 1; i >= 0; i-- { + if err := tw.closers[i].Close(); err != nil { + return err + } + } + return nil +} + +// AddTransform adds a transform. +func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { + switch id { + default: + return NewTApplicationException( + INVALID_TRANSFORM, + fmt.Sprintf("THeaderTransformID %d not supported", id), + ) + case TransformNone: + // no-op + case TransformZlib: + writeCloser := zlib.NewWriter(tw.Writer) + tw.Writer = writeCloser + tw.closers = append(tw.closers, writeCloser) + } + return nil +} + +// THeaderInfoType is the type id of the info headers. +type THeaderInfoType int32 + +// Supported THeaderInfoType values. +const ( + _ THeaderInfoType = iota // Skip 0 + InfoKeyValue // 1 + // Rest of the info types are not supported. +) + +// THeaderTransport is a Transport mode that implements THeader. +// +// Note that THeaderTransport handles frame and zlib by itself, +// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), +// instead of rich transports like TZlibTransport or TFramedTransport. +type THeaderTransport struct { + SequenceID int32 + Flags uint32 + + transport TTransport + + // THeaderMap for read and write + readHeaders THeaderMap + writeHeaders THeaderMap + + // Reading related variables. + reader *bufio.Reader + // When frame is detected, we read the frame fully into frameBuffer. + frameBuffer bytes.Buffer + // When it's non-nil, Read should read from frameReader instead of + // reader, and EOF error indicates end of frame instead of end of all + // transport. + frameReader io.ReadCloser + + // Writing related variables + writeBuffer bytes.Buffer + writeTransforms []THeaderTransformID + + clientType clientType + protocolID THeaderProtocolID + cfg *TConfiguration + + // buffer is used in the following scenarios to avoid repetitive + // allocations, while 4 is big enough for all those scenarios: + // + // * header padding (max size 4) + // * write the frame size (size 4) + buffer [4]byte +} + +var _ TTransport = (*THeaderTransport)(nil) + +// Deprecated: Use NewTHeaderTransportConf instead. +func NewTHeaderTransport(trans TTransport) *THeaderTransport { + return NewTHeaderTransportConf(trans, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderTransportConf creates THeaderTransport from the +// underlying transport, with given TConfiguration attached. +// +// If trans is already a *THeaderTransport, it will be returned as is, +// but with TConfiguration overridden by the value passed in. +// +// The protocol ID in TConfiguration is only useful for client transports. +// For servers, +// the protocol ID will be overridden again to the one set by the client, +// to ensure that servers always speak the same dialect as the client. +func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport { + if ht, ok := trans.(*THeaderTransport); ok { + ht.SetTConfiguration(conf) + return ht + } + PropagateTConfiguration(trans, conf) + return &THeaderTransport{ + transport: trans, + reader: bufio.NewReader(trans), + writeHeaders: make(THeaderMap), + protocolID: conf.GetTHeaderProtocolID(), + cfg: conf, + } +} + +// Open calls the underlying transport's Open function. +func (t *THeaderTransport) Open() error { + return t.transport.Open() +} + +// IsOpen calls the underlying transport's IsOpen function. +func (t *THeaderTransport) IsOpen() bool { + return t.transport.IsOpen() +} + +// ReadFrame tries to read the frame header, guess the client type, and handle +// unframed clients. +func (t *THeaderTransport) ReadFrame(ctx context.Context) error { + if !t.needReadFrame() { + // No need to read frame, skipping. + return nil + } + + // Peek and handle the first 32 bits. + // They could either be the length field of a framed message, + // or the first bytes of an unframed message. + var buf []byte + var err error + // This is also usually the first read from a connection, + // so handle retries around socket timeouts. + _, deadlineSet := ctx.Deadline() + for { + buf, err = t.reader.Peek(size32) + if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { + // This is I/O timeout and we still have time, + // continue trying + continue + } + // For anything else, do not retry + break + } + if err != nil { + return err + } + + frameSize := binary.BigEndian.Uint32(buf) + if frameSize&VERSION_MASK == VERSION_1 { + t.clientType = clientUnframedBinary + return nil + } + if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { + t.clientType = clientUnframedCompact + return nil + } + + // At this point it should be a framed message, + // sanity check on frameSize then discard the peeked part. + if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + errors.New("frame too large"), + ) + } + t.reader.Discard(size32) + + // Read the frame fully into frameBuffer. + _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize)) + if err != nil { + return err + } + t.frameReader = ioutil.NopCloser(&t.frameBuffer) + + // Peek and handle the next 32 bits. + buf = t.frameBuffer.Bytes()[:size32] + version := binary.BigEndian.Uint32(buf) + if version&THeaderHeaderMask == THeaderHeaderMagic { + t.clientType = clientHeaders + return t.parseHeaders(ctx, frameSize) + } + if version&VERSION_MASK == VERSION_1 { + t.clientType = clientFramedBinary + return nil + } + if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { + t.clientType = clientFramedCompact + return nil + } + if err := t.endOfFrame(); err != nil { + return err + } + return NewTProtocolExceptionWithType( + NOT_IMPLEMENTED, + errors.New("unsupported client transport type"), + ) +} + +// endOfFrame does end of frame handling. +// +// It closes frameReader, and also resets frame related states. +func (t *THeaderTransport) endOfFrame() error { + defer func() { + t.frameBuffer.Reset() + t.frameReader = nil + }() + return t.frameReader.Close() +} + +func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error { + if t.clientType != clientHeaders { + return nil + } + + var err error + var meta headerMeta + if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil { + return err + } + frameSize -= headerMetaSize + t.Flags = meta.MagicFlags & THeaderFlagsMask + t.SequenceID = meta.SequenceID + headerLength := int64(meta.HeaderLength) * 4 + if int64(frameSize) < headerLength { + return NewTProtocolExceptionWithType( + SIZE_LIMIT, + errors.New("header size is larger than the whole frame"), + ) + } + headerBuf := NewTMemoryBuffer() + _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength) + if err != nil { + return err + } + hp := NewTCompactProtocol(headerBuf) + hp.SetTConfiguration(t.cfg) + + // At this point the header is already read into headerBuf, + // and t.frameBuffer starts from the actual payload. + protoID, err := hp.readVarint32() + if err != nil { + return err + } + t.protocolID = THeaderProtocolID(protoID) + + var transformCount int32 + transformCount, err = hp.readVarint32() + if err != nil { + return err + } + if transformCount > 0 { + reader := NewTransformReaderWithCapacity( + &t.frameBuffer, + int(transformCount), + ) + t.frameReader = reader + transformIDs := make([]THeaderTransformID, transformCount) + for i := 0; i < int(transformCount); i++ { + id, err := hp.readVarint32() + if err != nil { + return err + } + transformIDs[i] = THeaderTransformID(id) + } + // The transform IDs on the wire was added based on the order of + // writing, so on the reading side we need to reverse the order. + for i := transformCount - 1; i >= 0; i-- { + id := transformIDs[i] + if err := reader.AddTransform(id); err != nil { + return err + } + } + } + + // The info part does not use the transforms yet, so it's + // important to continue using headerBuf. + headers := make(THeaderMap) + for { + infoType, err := hp.readVarint32() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return err + } + if THeaderInfoType(infoType) == InfoKeyValue { + count, err := hp.readVarint32() + if err != nil { + return err + } + for i := 0; i < int(count); i++ { + key, err := hp.ReadString(ctx) + if err != nil { + return err + } + value, err := hp.ReadString(ctx) + if err != nil { + return err + } + headers[key] = value + } + } else { + // Skip reading info section on the first + // unsupported info type. + break + } + } + t.readHeaders = headers + + return nil +} + +func (t *THeaderTransport) needReadFrame() bool { + if t.clientType == clientUnknown { + // This is a new connection that's never read before. + return true + } + if t.isFramed() && t.frameReader == nil { + // We just finished the last frame. + return true + } + return false +} + +func (t *THeaderTransport) Read(p []byte) (read int, err error) { + // Here using context.Background instead of a context passed in is safe. + // First is that there's no way to pass context into this function. + // Then, 99% of the case when calling this Read frame is already read + // into frameReader. ReadFrame here is more of preventing bugs that + // didn't call ReadFrame before calling Read. + err = t.ReadFrame(context.Background()) + if err != nil { + return + } + if t.frameReader != nil { + read, err = t.frameReader.Read(p) + if err == nil && t.frameBuffer.Len() <= 0 { + // the last Read finished the frame, do endOfFrame + // handling here. + err = t.endOfFrame() + } else if err == io.EOF { + err = t.endOfFrame() + if err != nil { + return + } + if read == 0 { + // Try to read the next frame when we hit EOF + // (end of frame) immediately. + // When we got here, it means the last read + // finished the previous frame, but didn't + // do endOfFrame handling yet. + // We have to read the next frame here, + // as otherwise we would return 0 and nil, + // which is a case not handled well by most + // protocol implementations. + return t.Read(p) + } + } + return + } + return t.reader.Read(p) +} + +// Write writes data to the write buffer. +// +// You need to call Flush to actually write them to the transport. +func (t *THeaderTransport) Write(p []byte) (int, error) { + return t.writeBuffer.Write(p) +} + +// Flush writes the appropriate header and the write buffer to the underlying transport. +func (t *THeaderTransport) Flush(ctx context.Context) error { + if t.writeBuffer.Len() == 0 { + return nil + } + + defer t.writeBuffer.Reset() + + switch t.clientType { + default: + fallthrough + case clientUnknown: + t.clientType = clientHeaders + fallthrough + case clientHeaders: + headers := NewTMemoryBuffer() + hp := NewTCompactProtocol(headers) + hp.SetTConfiguration(t.cfg) + if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil { + return NewTTransportExceptionFromError(err) + } + for _, transform := range t.writeTransforms { + if _, err := hp.writeVarint32(int32(transform)); err != nil { + return NewTTransportExceptionFromError(err) + } + } + if len(t.writeHeaders) > 0 { + if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil { + return NewTTransportExceptionFromError(err) + } + for key, value := range t.writeHeaders { + if err := hp.WriteString(ctx, key); err != nil { + return NewTTransportExceptionFromError(err) + } + if err := hp.WriteString(ctx, value); err != nil { + return NewTTransportExceptionFromError(err) + } + } + } + padding := 4 - headers.Len()%4 + if padding < 4 { + buf := t.buffer[:padding] + for i := range buf { + buf[i] = 0 + } + if _, err := headers.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + } + + var payload bytes.Buffer + meta := headerMeta{ + MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask, + SequenceID: t.SequenceID, + HeaderLength: uint16(headers.Len() / 4), + } + if err := binary.Write(&payload, binary.BigEndian, meta); err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := io.Copy(&payload, headers); err != nil { + return NewTTransportExceptionFromError(err) + } + + writer, err := NewTransformWriter(&payload, t.writeTransforms) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if _, err := io.Copy(writer, &t.writeBuffer); err != nil { + return NewTTransportExceptionFromError(err) + } + if err := writer.Close(); err != nil { + return NewTTransportExceptionFromError(err) + } + + // First write frame length + buf := t.buffer[:size32] + binary.BigEndian.PutUint32(buf, uint32(payload.Len())) + if _, err := t.transport.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + // Then write the payload + if _, err := io.Copy(t.transport, &payload); err != nil { + return NewTTransportExceptionFromError(err) + } + + case clientFramedBinary, clientFramedCompact: + buf := t.buffer[:size32] + binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len())) + if _, err := t.transport.Write(buf); err != nil { + return NewTTransportExceptionFromError(err) + } + fallthrough + case clientUnframedBinary, clientUnframedCompact: + if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil { + return NewTTransportExceptionFromError(err) + } + } + + select { + default: + case <-ctx.Done(): + return NewTTransportExceptionFromError(ctx.Err()) + } + + return t.transport.Flush(ctx) +} + +// Close closes the transport, along with its underlying transport. +func (t *THeaderTransport) Close() error { + if err := t.Flush(context.Background()); err != nil { + return err + } + return t.transport.Close() +} + +// RemainingBytes calls underlying transport's RemainingBytes. +// +// Even in framed cases, because of all the possible compression transforms +// involved, the remaining frame size is likely to be different from the actual +// remaining readable bytes, so we don't bother to keep tracking the remaining +// frame size by ourselves and just use the underlying transport's +// RemainingBytes directly. +func (t *THeaderTransport) RemainingBytes() uint64 { + return t.transport.RemainingBytes() +} + +// GetReadHeaders returns the THeaderMap read from transport. +func (t *THeaderTransport) GetReadHeaders() THeaderMap { + return t.readHeaders +} + +// SetWriteHeader sets a header for write. +func (t *THeaderTransport) SetWriteHeader(key, value string) { + t.writeHeaders[key] = value +} + +// ClearWriteHeaders clears all write headers previously set. +func (t *THeaderTransport) ClearWriteHeaders() { + t.writeHeaders = make(THeaderMap) +} + +// AddTransform add a transform for writing. +func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error { + if !supportedTransformIDs[transform] { + return NewTProtocolExceptionWithType( + NOT_IMPLEMENTED, + fmt.Errorf("THeaderTransformID %d not supported", transform), + ) + } + t.writeTransforms = append(t.writeTransforms, transform) + return nil +} + +// Protocol returns the wrapped protocol id used in this THeaderTransport. +func (t *THeaderTransport) Protocol() THeaderProtocolID { + switch t.clientType { + default: + return t.protocolID + case clientFramedBinary, clientUnframedBinary: + return THeaderProtocolBinary + case clientFramedCompact, clientUnframedCompact: + return THeaderProtocolCompact + } +} + +func (t *THeaderTransport) isFramed() bool { + switch t.clientType { + default: + return false + case clientHeaders, clientFramedBinary, clientFramedCompact: + return true + } +} + +// SetTConfiguration implements TConfigurationSetter. +func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(t.transport, cfg) + t.cfg = cfg +} + +// THeaderTransportFactory is a TTransportFactory implementation to create +// THeaderTransport. +// +// It also implements TConfigurationSetter. +type THeaderTransportFactory struct { + // The underlying factory, could be nil. + Factory TTransportFactory + + cfg *TConfiguration +} + +// Deprecated: Use NewTHeaderTransportFactoryConf instead. +func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory { + return NewTHeaderTransportFactoryConf(factory, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with +// the given *TConfiguration. +func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { + return &THeaderTransportFactory{ + Factory: factory, + + cfg: conf, + } +} + +// GetTransport implements TTransportFactory. +func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if f.Factory != nil { + t, err := f.Factory.GetTransport(trans) + if err != nil { + return nil, err + } + return NewTHeaderTransportConf(t, f.cfg), nil + } + return NewTHeaderTransportConf(trans, f.cfg), nil +} + +// SetTConfiguration implements TConfigurationSetter. +func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(f.Factory, f.cfg) + f.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*THeaderTransportFactory)(nil) + _ TConfigurationSetter = (*THeaderTransport)(nil) +) diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/logger.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/logger.go new file mode 100644 index 000000000..50d44ec8e --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/logger.go @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "log" + "os" +) + +// Logger is a simple wrapper of a logging function. +// +// In reality the users might actually use different logging libraries, and they +// are not always compatible with each other. +// +// Logger is meant to be a simple common ground that it's easy to wrap whatever +// logging library they use into. +// +// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design +// discussion behind it. +type Logger func(msg string) + +// NopLogger is a Logger implementation that does nothing. +func NopLogger(msg string) {} + +// StdLogger wraps stdlib log package into a Logger. +// +// If logger passed in is nil, it will fallback to use stderr and default flags. +func StdLogger(logger *log.Logger) Logger { + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + return func(msg string) { + logger.Print(msg) + } +} + +func fallbackLogger(logger Logger) Logger { + if logger == nil { + return StdLogger(nil) + } + return logger +} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go similarity index 91% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go index b62fd56f0..5936d2730 100644 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go @@ -21,6 +21,7 @@ package thrift import ( "bytes" + "context" ) // Memory buffer-based implementation of the TTransport interface. @@ -33,14 +34,14 @@ type TMemoryBufferTransportFactory struct { size int } -func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport { +func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { if trans != nil { t, ok := trans.(*TMemoryBuffer) if ok && t.size > 0 { - return NewTMemoryBufferLen(t.size) + return NewTMemoryBufferLen(t.size), nil } } - return NewTMemoryBufferLen(p.size) + return NewTMemoryBufferLen(p.size), nil } func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { @@ -70,7 +71,7 @@ func (p *TMemoryBuffer) Close() error { } // Flushing a memory buffer is a no-op -func (p *TMemoryBuffer) Flush() error { +func (p *TMemoryBuffer) Flush(ctx context.Context) error { return nil } diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go similarity index 100% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go new file mode 100644 index 000000000..e4512d204 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "math" + "strconv" +) + +type Numeric interface { + Int64() int64 + Int32() int32 + Int16() int16 + Byte() byte + Int() int + Float64() float64 + Float32() float32 + String() string + isNull() bool +} + +type numeric struct { + iValue int64 + dValue float64 + sValue string + isNil bool +} + +var ( + INFINITY Numeric + NEGATIVE_INFINITY Numeric + NAN Numeric + ZERO Numeric + NUMERIC_NULL Numeric +) + +func NewNumericFromDouble(dValue float64) Numeric { + if math.IsInf(dValue, 1) { + return INFINITY + } + if math.IsInf(dValue, -1) { + return NEGATIVE_INFINITY + } + if math.IsNaN(dValue) { + return NAN + } + iValue := int64(dValue) + sValue := strconv.FormatFloat(dValue, 'g', 10, 64) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI64(iValue int64) Numeric { + dValue := float64(iValue) + sValue := strconv.FormatInt(iValue, 10) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI32(iValue int32) Numeric { + dValue := float64(iValue) + sValue := strconv.FormatInt(int64(iValue), 10) + isNil := false + return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromString(sValue string) Numeric { + if sValue == INFINITY.String() { + return INFINITY + } + if sValue == NEGATIVE_INFINITY.String() { + return NEGATIVE_INFINITY + } + if sValue == NAN.String() { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + isNil := len(sValue) == 0 + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromJSONString(sValue string, isNull bool) Numeric { + if isNull { + return NewNullNumeric() + } + if sValue == JSON_INFINITY { + return INFINITY + } + if sValue == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY + } + if sValue == JSON_NAN { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} +} + +func NewNullNumeric() Numeric { + return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} +} + +func (p *numeric) Int64() int64 { + return p.iValue +} + +func (p *numeric) Int32() int32 { + return int32(p.iValue) +} + +func (p *numeric) Int16() int16 { + return int16(p.iValue) +} + +func (p *numeric) Byte() byte { + return byte(p.iValue) +} + +func (p *numeric) Int() int { + return int(p.iValue) +} + +func (p *numeric) Float64() float64 { + return p.dValue +} + +func (p *numeric) Float32() float32 { + return float32(p.dValue) +} + +func (p *numeric) String() string { + return p.sValue +} + +func (p *numeric) isNull() bool { + return p.isNil +} + +func init() { + INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} + NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} + NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} + ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} + NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go new file mode 100644 index 000000000..245a3ccfc --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +// A processor is a generic object which operates upon an input stream and +// writes to some output stream. +type TProcessor interface { + Process(ctx context.Context, in, out TProtocol) (bool, TException) + + // ProcessorMap returns a map of thrift method names to TProcessorFunctions. + ProcessorMap() map[string]TProcessorFunction + + // AddToProcessorMap adds the given TProcessorFunction to the internal + // processor map at the given key. + // + // If one is already set at the given key, it will be replaced with the new + // TProcessorFunction. + AddToProcessorMap(string, TProcessorFunction) +} + +type TProcessorFunction interface { + Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + +// The default processor factory just returns a singleton +// instance. +type TProcessorFactory interface { + GetProcessor(trans TTransport) TProcessor +} + +type tProcessorFactory struct { + processor TProcessor +} + +func NewTProcessorFactory(p TProcessor) TProcessorFactory { + return &tProcessorFactory{processor: p} +} + +func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { + return p.processor +} + +/** + * The default processor factory just returns a singleton + * instance. + */ +type TProcessorFunctionFactory interface { + GetProcessorFunction(trans TTransport) TProcessorFunction +} + +type tProcessorFunctionFactory struct { + processor TProcessorFunction +} + +func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { + return &tProcessorFunctionFactory{processor: p} +} + +func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { + return p.processor +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go new file mode 100644 index 000000000..0a69bd416 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "fmt" +) + +const ( + VERSION_MASK = 0xffff0000 + VERSION_1 = 0x80010000 +) + +type TProtocol interface { + WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error + WriteMessageEnd(ctx context.Context) error + WriteStructBegin(ctx context.Context, name string) error + WriteStructEnd(ctx context.Context) error + WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error + WriteFieldEnd(ctx context.Context) error + WriteFieldStop(ctx context.Context) error + WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error + WriteMapEnd(ctx context.Context) error + WriteListBegin(ctx context.Context, elemType TType, size int) error + WriteListEnd(ctx context.Context) error + WriteSetBegin(ctx context.Context, elemType TType, size int) error + WriteSetEnd(ctx context.Context) error + WriteBool(ctx context.Context, value bool) error + WriteByte(ctx context.Context, value int8) error + WriteI16(ctx context.Context, value int16) error + WriteI32(ctx context.Context, value int32) error + WriteI64(ctx context.Context, value int64) error + WriteDouble(ctx context.Context, value float64) error + WriteString(ctx context.Context, value string) error + WriteBinary(ctx context.Context, value []byte) error + + ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) + ReadMessageEnd(ctx context.Context) error + ReadStructBegin(ctx context.Context) (name string, err error) + ReadStructEnd(ctx context.Context) error + ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) + ReadFieldEnd(ctx context.Context) error + ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) + ReadMapEnd(ctx context.Context) error + ReadListBegin(ctx context.Context) (elemType TType, size int, err error) + ReadListEnd(ctx context.Context) error + ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) + ReadSetEnd(ctx context.Context) error + ReadBool(ctx context.Context) (value bool, err error) + ReadByte(ctx context.Context) (value int8, err error) + ReadI16(ctx context.Context) (value int16, err error) + ReadI32(ctx context.Context) (value int32, err error) + ReadI64(ctx context.Context) (value int64, err error) + ReadDouble(ctx context.Context) (value float64, err error) + ReadString(ctx context.Context) (value string, err error) + ReadBinary(ctx context.Context) (value []byte, err error) + + Skip(ctx context.Context, fieldType TType) (err error) + Flush(ctx context.Context) (err error) + + Transport() TTransport +} + +// The maximum recursive depth the skip() function will traverse +const DEFAULT_RECURSION_DEPTH = 64 + +// Skips over the next data element from the provided input TProtocol object. +func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) { + return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH) +} + +// Skips over the next data element from the provided input TProtocol object. +func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) { + + if maxDepth <= 0 { + return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) + } + + switch fieldType { + case BOOL: + _, err = self.ReadBool(ctx) + return + case BYTE: + _, err = self.ReadByte(ctx) + return + case I16: + _, err = self.ReadI16(ctx) + return + case I32: + _, err = self.ReadI32(ctx) + return + case I64: + _, err = self.ReadI64(ctx) + return + case DOUBLE: + _, err = self.ReadDouble(ctx) + return + case STRING: + _, err = self.ReadString(ctx) + return + case STRUCT: + if _, err = self.ReadStructBegin(ctx); err != nil { + return err + } + for { + _, typeId, _, _ := self.ReadFieldBegin(ctx) + if typeId == STOP { + break + } + err := Skip(ctx, self, typeId, maxDepth-1) + if err != nil { + return err + } + self.ReadFieldEnd(ctx) + } + return self.ReadStructEnd(ctx) + case MAP: + keyType, valueType, size, err := self.ReadMapBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, keyType, maxDepth-1) + if err != nil { + return err + } + self.Skip(ctx, valueType) + } + return self.ReadMapEnd(ctx) + case SET: + elemType, size, err := self.ReadSetBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadSetEnd(ctx) + case LIST: + elemType, size, err := self.ReadListBegin(ctx) + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(ctx, self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadListEnd(ctx) + default: + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) + } + return nil +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go new file mode 100644 index 000000000..9dcf4bfd9 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" + "errors" +) + +// Thrift Protocol exception +type TProtocolException interface { + TException + TypeId() int +} + +const ( + UNKNOWN_PROTOCOL_EXCEPTION = 0 + INVALID_DATA = 1 + NEGATIVE_SIZE = 2 + SIZE_LIMIT = 3 + BAD_VERSION = 4 + NOT_IMPLEMENTED = 5 + DEPTH_LIMIT = 6 +) + +type tProtocolException struct { + typeId int + err error + msg string +} + +var _ TProtocolException = (*tProtocolException)(nil) + +func (tProtocolException) TExceptionType() TExceptionType { + return TExceptionTypeProtocol +} + +func (p *tProtocolException) TypeId() int { + return p.typeId +} + +func (p *tProtocolException) String() string { + return p.msg +} + +func (p *tProtocolException) Error() string { + return p.msg +} + +func (p *tProtocolException) Unwrap() error { + return p.err +} + +func NewTProtocolException(err error) TProtocolException { + if err == nil { + return nil + } + + if e, ok := err.(TProtocolException); ok { + return e + } + + if errors.As(err, new(base64.CorruptInputError)) { + return NewTProtocolExceptionWithType(INVALID_DATA, err) + } + + return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err) +} + +func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { + if err == nil { + return nil + } + return &tProtocolException{ + typeId: errType, + err: err, + msg: err.Error(), + } +} + +func prependTProtocolException(prepend string, err TProtocolException) TProtocolException { + return &tProtocolException{ + typeId: err.TypeId(), + err: err, + msg: prepend + err.Error(), + } +} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go similarity index 100% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go new file mode 100644 index 000000000..02f061395 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs. +type responseHelperKey struct{} + +// TResponseHelper defines a object with a set of helper functions that can be +// retrieved from the context object passed into server handler functions. +// +// Use GetResponseHelper to retrieve the injected TResponseHelper implementation +// from the context object. +// +// The zero value of TResponseHelper is valid with all helper functions being +// no-op. +type TResponseHelper struct { + // THeader related functions + *THeaderResponseHelper +} + +// THeaderResponseHelper defines THeader related TResponseHelper functions. +// +// The zero value of *THeaderResponseHelper is valid with all helper functions +// being no-op. +type THeaderResponseHelper struct { + proto *THeaderProtocol +} + +// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the +// underlying TProtocol. +func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper { + if hp, ok := proto.(*THeaderProtocol); ok { + return &THeaderResponseHelper{ + proto: hp, + } + } + return nil +} + +// SetHeader sets a response header. +// +// It's no-op if the underlying protocol/transport does not support THeader. +func (h *THeaderResponseHelper) SetHeader(key, value string) { + if h != nil && h.proto != nil { + h.proto.SetWriteHeader(key, value) + } +} + +// ClearHeaders clears all the response headers previously set. +// +// It's no-op if the underlying protocol/transport does not support THeader. +func (h *THeaderResponseHelper) ClearHeaders() { + if h != nil && h.proto != nil { + h.proto.ClearWriteHeaders() + } +} + +// GetResponseHelper retrieves the TResponseHelper implementation injected into +// the context object. +// +// If no helper was found in the context object, a nop helper with ok == false +// will be returned. +func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) { + if v := ctx.Value(responseHelperKey{}); v != nil { + helper, ok = v.(TResponseHelper) + } + return +} + +// SetResponseHelper injects TResponseHelper into the context object. +func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context { + return context.WithValue(ctx, responseHelperKey{}, helper) +} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go similarity index 95% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go index 8e296a99b..83fdf29f5 100644 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go @@ -19,7 +19,10 @@ package thrift -import "io" +import ( + "errors" + "io" +) type RichTransport struct { TTransport @@ -49,7 +52,7 @@ func (r *RichTransport) RemainingBytes() (num_bytes uint64) { func readByte(r io.Reader) (c byte, err error) { v := [1]byte{0} n, err := r.Read(v[0:1]) - if n > 0 && (err == nil || err == io.EOF) { + if n > 0 && (err == nil || errors.Is(err, io.EOF)) { return v[0], nil } if n > 0 && err != nil { @@ -66,4 +69,3 @@ func writeByte(w io.Writer, c byte) error { _, err := w.Write(v[0:1]) return err } - diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go new file mode 100644 index 000000000..c44979094 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "sync" +) + +type TSerializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +type TStruct interface { + Write(ctx context.Context, p TProtocol) error + Read(ctx context.Context, p TProtocol) error +} + +func NewTSerializer() *TSerializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolTransport(transport) + + return &TSerializer{ + Transport: transport, + Protocol: protocol, + } +} + +func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { + t.Transport.Reset() + + if err = msg.Write(ctx, t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + if err = t.Transport.Flush(ctx); err != nil { + return + } + + return t.Transport.String(), nil +} + +func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { + t.Transport.Reset() + + if err = msg.Write(ctx, t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + + if err = t.Transport.Flush(ctx); err != nil { + return + } + + b = append(b, t.Transport.Bytes()...) + return +} + +// TSerializerPool is the thread-safe version of TSerializer, it uses resource +// pool of TSerializer under the hood. +// +// It must be initialized with either NewTSerializerPool or +// NewTSerializerPoolSizeFactory. +type TSerializerPool struct { + pool sync.Pool +} + +// NewTSerializerPool creates a new TSerializerPool. +// +// NewTSerializer can be used as the arg here. +func NewTSerializerPool(f func() *TSerializer) *TSerializerPool { + return &TSerializerPool{ + pool: sync.Pool{ + New: func() interface{} { + return f() + }, + }, + } +} + +// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given +// size and protocol factory. +// +// Note that the size is not the limit. The TMemoryBuffer underneath can grow +// larger than that. It just dictates the initial size. +func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool { + return &TSerializerPool{ + pool: sync.Pool{ + New: func() interface{} { + transport := NewTMemoryBufferLen(size) + protocol := factory.GetProtocol(transport) + + return &TSerializer{ + Transport: transport, + Protocol: protocol, + } + }, + }, + } +} + +func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) { + s := t.pool.Get().(*TSerializer) + defer t.pool.Put(s) + return s.WriteString(ctx, msg) +} + +func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) { + s := t.pool.Get().(*TSerializer) + defer t.pool.Put(s) + return s.Write(ctx, msg) +} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go similarity index 100% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go similarity index 78% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go index 735332231..d1a815453 100644 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go @@ -22,8 +22,10 @@ package thrift import ( "bufio" "bytes" + "context" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "math" @@ -33,12 +35,13 @@ import ( type _ParseContext int const ( - _CONTEXT_IN_TOPLEVEL _ParseContext = 1 - _CONTEXT_IN_LIST_FIRST _ParseContext = 2 - _CONTEXT_IN_LIST _ParseContext = 3 - _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 - _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 - _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 + _CONTEXT_INVALID _ParseContext = iota + _CONTEXT_IN_TOPLEVEL // 1 + _CONTEXT_IN_LIST_FIRST // 2 + _CONTEXT_IN_LIST // 3 + _CONTEXT_IN_OBJECT_FIRST // 4 + _CONTEXT_IN_OBJECT_NEXT_KEY // 5 + _CONTEXT_IN_OBJECT_NEXT_VALUE // 6 ) func (p _ParseContext) String() string { @@ -59,7 +62,33 @@ func (p _ParseContext) String() string { return "UNKNOWN-PARSE-CONTEXT" } -// JSON protocol implementation for thrift. +type jsonContextStack []_ParseContext + +func (s *jsonContextStack) push(v _ParseContext) { + *s = append(*s, v) +} + +func (s jsonContextStack) peek() (v _ParseContext, ok bool) { + l := len(s) + if l <= 0 { + return + } + return s[l-1], true +} + +func (s *jsonContextStack) pop() (v _ParseContext, ok bool) { + l := len(*s) + if l <= 0 { + return + } + v = (*s)[l-1] + *s = (*s)[0 : l-1] + return v, true +} + +var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack")) + +// Simple JSON protocol implementation for thrift. // // This protocol produces/consumes a simple output format // suitable for parsing by scripting languages. It should not be @@ -68,8 +97,8 @@ func (p _ParseContext) String() string { type TSimpleJSONProtocol struct { trans TTransport - parseContextStack []int - dumpContext []int + parseContextStack jsonContextStack + dumpContext jsonContextStack writer *bufio.Writer reader *bufio.Reader @@ -81,8 +110,8 @@ func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { writer: bufio.NewWriter(t), reader: bufio.NewReader(t), } - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) + v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) + v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) return v } @@ -155,114 +184,113 @@ func mismatch(expected, actual string) error { return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) } -func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { +func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { p.resetContextStack() // THRIFT-3735 if e := p.OutputListBegin(); e != nil { return e } - if e := p.WriteString(name); e != nil { + if e := p.WriteString(ctx, name); e != nil { return e } - if e := p.WriteByte(int8(typeId)); e != nil { + if e := p.WriteByte(ctx, int8(typeId)); e != nil { return e } - if e := p.WriteI32(seqId); e != nil { + if e := p.WriteI32(ctx, seqId); e != nil { return e } return nil } -func (p *TSimpleJSONProtocol) WriteMessageEnd() error { +func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error { +func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { if e := p.OutputObjectBegin(); e != nil { return e } return nil } -func (p *TSimpleJSONProtocol) WriteStructEnd() error { +func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error { return p.OutputObjectEnd() } -func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if e := p.WriteString(name); e != nil { +func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if e := p.WriteString(ctx, name); e != nil { return e } return nil } -func (p *TSimpleJSONProtocol) WriteFieldEnd() error { - //return p.OutputListEnd() +func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error { return nil } -func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil } +func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } -func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { +func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { if e := p.OutputListBegin(); e != nil { return e } - if e := p.WriteByte(int8(keyType)); e != nil { + if e := p.WriteByte(ctx, int8(keyType)); e != nil { return e } - if e := p.WriteByte(int8(valueType)); e != nil { + if e := p.WriteByte(ctx, int8(valueType)); e != nil { return e } - return p.WriteI32(int32(size)) + return p.WriteI32(ctx, int32(size)) } -func (p *TSimpleJSONProtocol) WriteMapEnd() error { +func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error { +func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { return p.OutputElemListBegin(elemType, size) } -func (p *TSimpleJSONProtocol) WriteListEnd() error { +func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error { +func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { return p.OutputElemListBegin(elemType, size) } -func (p *TSimpleJSONProtocol) WriteSetEnd() error { +func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error { return p.OutputListEnd() } -func (p *TSimpleJSONProtocol) WriteBool(b bool) error { +func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error { return p.OutputBool(b) } -func (p *TSimpleJSONProtocol) WriteByte(b int8) error { - return p.WriteI32(int32(b)) +func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error { + return p.WriteI32(ctx, int32(b)) } -func (p *TSimpleJSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) +func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error { + return p.WriteI32(ctx, int32(v)) } -func (p *TSimpleJSONProtocol) WriteI32(v int32) error { +func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error { return p.OutputI64(int64(v)) } -func (p *TSimpleJSONProtocol) WriteI64(v int64) error { +func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error { return p.OutputI64(int64(v)) } -func (p *TSimpleJSONProtocol) WriteDouble(v float64) error { +func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error { return p.OutputF64(v) } -func (p *TSimpleJSONProtocol) WriteString(v string) error { +func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error { return p.OutputString(v) } -func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { +func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { // JSON library only takes in a string, // not an arbitrary byte array, to ensure bytes are transmitted // efficiently we must convert this into a valid JSON string @@ -288,39 +316,39 @@ func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { } // Reading methods. -func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { +func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { p.resetContextStack() // THRIFT-3735 if isNull, err := p.ParseListBegin(); isNull || err != nil { return name, typeId, seqId, err } - if name, err = p.ReadString(); err != nil { + if name, err = p.ReadString(ctx); err != nil { return name, typeId, seqId, err } - bTypeId, err := p.ReadByte() + bTypeId, err := p.ReadByte(ctx) typeId = TMessageType(bTypeId) if err != nil { return name, typeId, seqId, err } - if seqId, err = p.ReadI32(); err != nil { + if seqId, err = p.ReadI32(ctx); err != nil { return name, typeId, seqId, err } return name, typeId, seqId, nil } -func (p *TSimpleJSONProtocol) ReadMessageEnd() error { +func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) { +func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { _, err = p.ParseObjectStart() return "", err } -func (p *TSimpleJSONProtocol) ReadStructEnd() error { +func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error { return p.ParseObjectEnd() } -func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { +func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { if err := p.ParsePreValue(); err != nil { return "", STOP, 0, err } @@ -339,21 +367,6 @@ func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { return name, STOP, 0, err } return name, STOP, -1, p.ParsePostValue() - /* - if err = p.ParsePostValue(); err != nil { - return name, STOP, 0, err - } - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, STOP, 0, err - } - bType, err := p.ReadByte() - thetype := TType(bType) - if err != nil { - return name, thetype, 0, err - } - id, err := p.ReadI16() - return name, thetype, id, err - */ } e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) @@ -361,57 +374,56 @@ func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { return "", STOP, 0, NewTProtocolException(io.EOF) } -func (p *TSimpleJSONProtocol) ReadFieldEnd() error { +func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error { return nil - //return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { +func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { if isNull, e := p.ParseListBegin(); isNull || e != nil { return VOID, VOID, 0, e } // read keyType - bKeyType, e := p.ReadByte() + bKeyType, e := p.ReadByte(ctx) keyType = TType(bKeyType) if e != nil { return keyType, valueType, size, e } // read valueType - bValueType, e := p.ReadByte() + bValueType, e := p.ReadByte(ctx) valueType = TType(bValueType) if e != nil { return keyType, valueType, size, e } // read size - iSize, err := p.ReadI64() + iSize, err := p.ReadI64(ctx) size = int(iSize) return keyType, valueType, size, err } -func (p *TSimpleJSONProtocol) ReadMapEnd() error { +func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { +func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { return p.ParseElemListBegin() } -func (p *TSimpleJSONProtocol) ReadListEnd() error { +func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { +func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { return p.ParseElemListBegin() } -func (p *TSimpleJSONProtocol) ReadSetEnd() error { +func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error { return p.ParseListEnd() } -func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { +func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) { var value bool if err := p.ParsePreValue(); err != nil { @@ -466,32 +478,32 @@ func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { return value, p.ParsePostValue() } -func (p *TSimpleJSONProtocol) ReadByte() (int8, error) { - v, err := p.ReadI64() +func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.ReadI64(ctx) return int8(v), err } -func (p *TSimpleJSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() +func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) { + v, err := p.ReadI64(ctx) return int16(v), err } -func (p *TSimpleJSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() +func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) { + v, err := p.ReadI64(ctx) return int32(v), err } -func (p *TSimpleJSONProtocol) ReadI64() (int64, error) { +func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) { v, _, err := p.ParseI64() return v, err } -func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) { +func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { v, _, err := p.ParseF64() return v, err } -func (p *TSimpleJSONProtocol) ReadString() (string, error) { +func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) { var v string if err := p.ParsePreValue(); err != nil { return v, err @@ -521,7 +533,7 @@ func (p *TSimpleJSONProtocol) ReadString() (string, error) { return v, p.ParsePostValue() } -func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { +func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { var v []byte if err := p.ParsePreValue(); err != nil { return nil, err @@ -552,12 +564,12 @@ func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { return v, p.ParsePostValue() } -func (p *TSimpleJSONProtocol) Flush() (err error) { +func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { return NewTProtocolException(p.writer.Flush()) } -func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) +func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) } func (p *TSimpleJSONProtocol) Transport() TTransport { @@ -565,41 +577,41 @@ func (p *TSimpleJSONProtocol) Transport() TTransport { } func (p *TSimpleJSONProtocol) OutputPreValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } switch cxt { case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: if _, e := p.write(JSON_COMMA); e != nil { return NewTProtocolException(e) } - break case _CONTEXT_IN_OBJECT_NEXT_VALUE: if _, e := p.write(JSON_COLON); e != nil { return NewTProtocolException(e) } - break } return nil } func (p *TSimpleJSONProtocol) OutputPostValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } switch cxt { case _CONTEXT_IN_LIST_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) - break + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_LIST) case _CONTEXT_IN_OBJECT_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) case _CONTEXT_IN_OBJECT_NEXT_KEY: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break + p.dumpContext.pop() + p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY) } return nil } @@ -614,10 +626,13 @@ func (p *TSimpleJSONProtocol) OutputBool(value bool) error { } else { v = string(JSON_FALSE) } - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } + switch cxt { case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: v = jsonQuote(v) - default: } if e := p.OutputStringData(v); e != nil { return e @@ -647,11 +662,14 @@ func (p *TSimpleJSONProtocol) OutputF64(value float64) error { } else if math.IsInf(value, -1) { v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) } else { + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } v = strconv.FormatFloat(value, 'g', -1, 64) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + switch cxt { case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - default: } } if e := p.OutputStringData(v); e != nil { @@ -664,11 +682,14 @@ func (p *TSimpleJSONProtocol) OutputI64(value int64) error { if e := p.OutputPreValue(); e != nil { return e } + cxt, ok := p.dumpContext.peek() + if !ok { + return errEmptyJSONContextStack + } v := strconv.FormatInt(value, 10) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + switch cxt { case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: v = jsonQuote(v) - default: } if e := p.OutputStringData(v); e != nil { return e @@ -698,7 +719,7 @@ func (p *TSimpleJSONProtocol) OutputObjectBegin() error { if _, e := p.write(JSON_LBRACE); e != nil { return NewTProtocolException(e) } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) + p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST) return nil } @@ -706,7 +727,10 @@ func (p *TSimpleJSONProtocol) OutputObjectEnd() error { if _, e := p.write(JSON_RBRACE); e != nil { return NewTProtocolException(e) } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + _, ok := p.dumpContext.pop() + if !ok { + return errEmptyJSONContextStack + } if e := p.OutputPostValue(); e != nil { return e } @@ -720,7 +744,7 @@ func (p *TSimpleJSONProtocol) OutputListBegin() error { if _, e := p.write(JSON_LBRACKET); e != nil { return NewTProtocolException(e) } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) + p.dumpContext.push(_CONTEXT_IN_LIST_FIRST) return nil } @@ -728,7 +752,10 @@ func (p *TSimpleJSONProtocol) OutputListEnd() error { if _, e := p.write(JSON_RBRACKET); e != nil { return NewTProtocolException(e) } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + _, ok := p.dumpContext.pop() + if !ok { + return errEmptyJSONContextStack + } if e := p.OutputPostValue(); e != nil { return e } @@ -739,10 +766,10 @@ func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) erro if e := p.OutputListBegin(); e != nil { return e } - if e := p.WriteByte(int8(elemType)); e != nil { + if e := p.OutputI64(int64(elemType)); e != nil { return e } - if e := p.WriteI64(int64(size)); e != nil { + if e := p.OutputI64(int64(size)); e != nil { return e } return nil @@ -752,7 +779,10 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error { if e := p.readNonSignificantWhitespace(); e != nil { return NewTProtocolException(e) } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + cxt, ok := p.parseContextStack.peek() + if !ok { + return errEmptyJSONContextStack + } b, _ := p.reader.Peek(1) switch cxt { case _CONTEXT_IN_LIST: @@ -771,7 +801,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error { return NewTProtocolExceptionWithType(INVALID_DATA, e) } } - break case _CONTEXT_IN_OBJECT_NEXT_KEY: if len(b) > 0 { switch b[0] { @@ -788,7 +817,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error { return NewTProtocolExceptionWithType(INVALID_DATA, e) } } - break case _CONTEXT_IN_OBJECT_NEXT_VALUE: if len(b) > 0 { switch b[0] { @@ -803,7 +831,6 @@ func (p *TSimpleJSONProtocol) ParsePreValue() error { return NewTProtocolExceptionWithType(INVALID_DATA, e) } } - break } return nil } @@ -812,20 +839,20 @@ func (p *TSimpleJSONProtocol) ParsePostValue() error { if e := p.readNonSignificantWhitespace(); e != nil { return NewTProtocolException(e) } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + cxt, ok := p.parseContextStack.peek() + if !ok { + return errEmptyJSONContextStack + } switch cxt { case _CONTEXT_IN_LIST_FIRST: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) - break + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_LIST) case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break + p.parseContextStack.pop() + p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY) } return nil } @@ -978,7 +1005,7 @@ func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { } if len(b) > 0 && b[0] == JSON_LBRACE[0] { p.reader.ReadByte() - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) + p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST) return false, nil } else if p.safePeekContains(JSON_NULL) { return true, nil @@ -991,7 +1018,7 @@ func (p *TSimpleJSONProtocol) ParseObjectEnd() error { if isNull, err := p.readIfNull(); isNull || err != nil { return err } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + cxt, _ := p.parseContextStack.peek() if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) return NewTProtocolExceptionWithType(INVALID_DATA, e) @@ -1009,7 +1036,7 @@ func (p *TSimpleJSONProtocol) ParseObjectEnd() error { break } } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack.pop() return p.ParsePostValue() } @@ -1023,7 +1050,7 @@ func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { return false, err } if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) + p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST) p.reader.ReadByte() isNull = false } else if p.safePeekContains(JSON_NULL) { @@ -1038,12 +1065,12 @@ func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e if isNull, e := p.ParseListBegin(); isNull || e != nil { return VOID, 0, e } - bElemType, err := p.ReadByte() + bElemType, _, err := p.ParseI64() elemType = TType(bElemType) if err != nil { return elemType, size, err } - nSize, err2 := p.ReadI64() + nSize, _, err2 := p.ParseI64() size = int(nSize) return elemType, size, err2 } @@ -1052,7 +1079,7 @@ func (p *TSimpleJSONProtocol) ParseListEnd() error { if isNull, err := p.readIfNull(); isNull || err != nil { return err } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + cxt, _ := p.parseContextStack.peek() if cxt != _CONTEXT_IN_LIST { e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) return NewTProtocolExceptionWithType(INVALID_DATA, e) @@ -1064,14 +1091,16 @@ func (p *TSimpleJSONProtocol) ParseListEnd() error { for _, char := range line { switch char { default: - e := fmt.Errorf("Expecting end of list \"]\", but found: \"", line, "\"") + e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) return NewTProtocolExceptionWithType(INVALID_DATA, e) case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): break } } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { + p.parseContextStack.pop() + if cxt, ok := p.parseContextStack.peek(); !ok { + return errEmptyJSONContextStack + } else if cxt == _CONTEXT_IN_TOPLEVEL { return nil } return p.ParsePostValue() @@ -1315,7 +1344,7 @@ func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { for i := 0; i < len(b); i++ { a, _ := p.reader.Peek(i + 1) - if len(a) == 0 || a[i] != b[i] { + if len(a) < (i+1) || a[i] != b[i] { return false } } @@ -1324,8 +1353,8 @@ func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { // Reset the context stack to its initial state. func (p *TSimpleJSONProtocol) resetContextStack() { - p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} - p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} + p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL} + p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL} } func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { @@ -1335,3 +1364,10 @@ func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { } return n, err } + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.trans, conf) +} + +var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil) diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go new file mode 100644 index 000000000..563cbfc69 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go @@ -0,0 +1,332 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" +) + +// ErrAbandonRequest is a special error server handler implementations can +// return to indicate that the request has been abandoned. +// +// TSimpleServer will check for this error, and close the client connection +// instead of writing the response/error back to the client. +// +// It shall only be used when the server handler implementation know that the +// client already abandoned the request (by checking that the passed in context +// is already canceled, for example). +var ErrAbandonRequest = errors.New("request abandoned") + +// ServerConnectivityCheckInterval defines the ticker interval used by +// connectivity check in thrift compiled TProcessorFunc implementations. +// +// It's defined as a variable instead of constant, so that thrift server +// implementations can change its value to control the behavior. +// +// If it's changed to <=0, the feature will be disabled. +var ServerConnectivityCheckInterval = time.Millisecond * 5 + +/* + * This is not a typical TSimpleServer as it is not blocked after accept a socket. + * It is more like a TThreadedServer that can handle different connections in different goroutines. + * This will work if golang user implements a conn-pool like thing in client side. + */ +type TSimpleServer struct { + closed int32 + wg sync.WaitGroup + mu sync.Mutex + + processorFactory TProcessorFactory + serverTransport TServerTransport + inputTransportFactory TTransportFactory + outputTransportFactory TTransportFactory + inputProtocolFactory TProtocolFactory + outputProtocolFactory TProtocolFactory + + // Headers to auto forward in THeaderProtocol + forwardHeaders []string + + logger Logger +} + +func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) +} + +func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory4(NewTProcessorFactory(processor), + serverTransport, + transportFactory, + protocolFactory, + ) +} + +func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(NewTProcessorFactory(processor), + serverTransport, + inputTransportFactory, + outputTransportFactory, + inputProtocolFactory, + outputProtocolFactory, + ) +} + +func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + NewTTransportFactory(), + NewTTransportFactory(), + NewTBinaryProtocolFactoryDefault(), + NewTBinaryProtocolFactoryDefault(), + ) +} + +func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + transportFactory, + transportFactory, + protocolFactory, + protocolFactory, + ) +} + +func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return &TSimpleServer{ + processorFactory: processorFactory, + serverTransport: serverTransport, + inputTransportFactory: inputTransportFactory, + outputTransportFactory: outputTransportFactory, + inputProtocolFactory: inputProtocolFactory, + outputProtocolFactory: outputProtocolFactory, + } +} + +func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { + return p.processorFactory +} + +func (p *TSimpleServer) ServerTransport() TServerTransport { + return p.serverTransport +} + +func (p *TSimpleServer) InputTransportFactory() TTransportFactory { + return p.inputTransportFactory +} + +func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { + return p.outputTransportFactory +} + +func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { + return p.inputProtocolFactory +} + +func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { + return p.outputProtocolFactory +} + +func (p *TSimpleServer) Listen() error { + return p.serverTransport.Listen() +} + +// SetForwardHeaders sets the list of header keys that will be auto forwarded +// while using THeaderProtocol. +// +// "forward" means that when the server is also a client to other upstream +// thrift servers, the context object user gets in the processor functions will +// have both read and write headers set, with write headers being forwarded. +// Users can always override the write headers by calling SetWriteHeaderList +// before calling thrift client functions. +func (p *TSimpleServer) SetForwardHeaders(headers []string) { + size := len(headers) + if size == 0 { + p.forwardHeaders = nil + return + } + + keys := make([]string, size) + copy(keys, headers) + p.forwardHeaders = keys +} + +// SetLogger sets the logger used by this TSimpleServer. +// +// If no logger was set before Serve is called, a default logger using standard +// log library will be used. +func (p *TSimpleServer) SetLogger(logger Logger) { + p.logger = logger +} + +func (p *TSimpleServer) innerAccept() (int32, error) { + client, err := p.serverTransport.Accept() + p.mu.Lock() + defer p.mu.Unlock() + closed := atomic.LoadInt32(&p.closed) + if closed != 0 { + return closed, nil + } + if err != nil { + return 0, err + } + if client != nil { + p.wg.Add(1) + go func() { + defer p.wg.Done() + if err := p.processRequests(client); err != nil { + p.logger(fmt.Sprintf("error processing request: %v", err)) + } + }() + } + return 0, nil +} + +func (p *TSimpleServer) AcceptLoop() error { + for { + closed, err := p.innerAccept() + if err != nil { + return err + } + if closed != 0 { + return nil + } + } +} + +func (p *TSimpleServer) Serve() error { + p.logger = fallbackLogger(p.logger) + + err := p.Listen() + if err != nil { + return err + } + p.AcceptLoop() + return nil +} + +func (p *TSimpleServer) Stop() error { + p.mu.Lock() + defer p.mu.Unlock() + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + atomic.StoreInt32(&p.closed, 1) + p.serverTransport.Interrupt() + p.wg.Wait() + return nil +} + +// If err is actually EOF, return nil, otherwise return err as-is. +func treatEOFErrorsAsNil(err error) error { + if err == nil { + return nil + } + if errors.Is(err, io.EOF) { + return nil + } + var te TTransportException + if errors.As(err, &te) && te.TypeId() == END_OF_FILE { + return nil + } + return err +} + +func (p *TSimpleServer) processRequests(client TTransport) (err error) { + defer func() { + err = treatEOFErrorsAsNil(err) + }() + + processor := p.processorFactory.GetProcessor(client) + inputTransport, err := p.inputTransportFactory.GetTransport(client) + if err != nil { + return err + } + inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) + var outputTransport TTransport + var outputProtocol TProtocol + + // for THeaderProtocol, we must use the same protocol instance for + // input and output so that the response is in the same dialect that + // the server detected the request was in. + headerProtocol, ok := inputProtocol.(*THeaderProtocol) + if ok { + outputProtocol = inputProtocol + } else { + oTrans, err := p.outputTransportFactory.GetTransport(client) + if err != nil { + return err + } + outputTransport = oTrans + outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) + } + + if inputTransport != nil { + defer inputTransport.Close() + } + if outputTransport != nil { + defer outputTransport.Close() + } + for { + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + + ctx := SetResponseHelper( + defaultCtx, + TResponseHelper{ + THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol), + }, + ) + if headerProtocol != nil { + // We need to call ReadFrame here, otherwise we won't + // get any headers on the AddReadTHeaderToContext call. + // + // ReadFrame is safe to be called multiple times so it + // won't break when it's called again later when we + // actually start to read the message. + if err := headerProtocol.ReadFrame(ctx); err != nil { + return err + } + ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders()) + ctx = SetWriteHeaderList(ctx, p.forwardHeaders) + } + + ok, err := processor.Process(ctx, inputProtocol, outputProtocol) + if errors.Is(err, ErrAbandonRequest) { + return client.Close() + } + if errors.As(err, new(TTransportException)) && err != nil { + return err + } + var tae TApplicationException + if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD { + continue + } + if !ok { + break + } + } + return nil +} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport.go similarity index 93% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport.go index 453899651..ba2738a8d 100644 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "errors" "io" ) @@ -30,15 +31,18 @@ type Flusher interface { Flush() (err error) } +type ContextFlusher interface { + Flush(ctx context.Context) (err error) +} + type ReadSizeProvider interface { RemainingBytes() (num_bytes uint64) } - // Encapsulates the I/O layer type TTransport interface { io.ReadWriteCloser - Flusher + ContextFlusher ReadSizeProvider // Opens the transport for communication @@ -52,7 +56,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } - // This is "enchanced" transport with extra capabilities. You need to use one of these // to construct protocol. // Notably, TSocket does not implement this interface, and it is always a mistake to use @@ -62,7 +65,6 @@ type TRichTransport interface { io.ByteReader io.ByteWriter stringWriter - Flusher + ContextFlusher ReadSizeProvider } - diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go new file mode 100644 index 000000000..0a3f07646 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +type timeoutable interface { + Timeout() bool +} + +// Thrift Transport exception +type TTransportException interface { + TException + TypeId() int + Err() error +} + +const ( + UNKNOWN_TRANSPORT_EXCEPTION = 0 + NOT_OPEN = 1 + ALREADY_OPEN = 2 + TIMED_OUT = 3 + END_OF_FILE = 4 +) + +type tTransportException struct { + typeId int + err error + msg string +} + +var _ TTransportException = (*tTransportException)(nil) + +func (tTransportException) TExceptionType() TExceptionType { + return TExceptionTypeTransport +} + +func (p *tTransportException) TypeId() int { + return p.typeId +} + +func (p *tTransportException) Error() string { + return p.msg +} + +func (p *tTransportException) Err() error { + return p.err +} + +func (p *tTransportException) Unwrap() error { + return p.err +} + +func (p *tTransportException) Timeout() bool { + return p.typeId == TIMED_OUT +} + +func NewTTransportException(t int, e string) TTransportException { + return &tTransportException{ + typeId: t, + err: errors.New(e), + msg: e, + } +} + +func NewTTransportExceptionFromError(e error) TTransportException { + if e == nil { + return nil + } + + if t, ok := e.(TTransportException); ok { + return t + } + + te := &tTransportException{ + typeId: UNKNOWN_TRANSPORT_EXCEPTION, + err: e, + msg: e.Error(), + } + + if isTimeoutError(e) { + te.typeId = TIMED_OUT + return te + } + + if errors.Is(e, io.EOF) { + te.typeId = END_OF_FILE + return te + } + + return te +} + +func prependTTransportException(prepend string, e TTransportException) TTransportException { + return &tTransportException{ + typeId: e.TypeId(), + err: e, + msg: prepend + e.Error(), + } +} + +// isTimeoutError returns true when err is an error caused by timeout. +// +// Note that this also includes TTransportException wrapped timeout errors. +func isTimeoutError(err error) bool { + var t timeoutable + if errors.As(err, &t) { + return t.Timeout() + } + return false +} diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go similarity index 89% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go index 533d1b437..c80580794 100644 --- a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go @@ -24,14 +24,14 @@ package thrift // a ServerTransport and then may want to mutate them (i.e. create // a BufferedTransport from the underlying base transport) type TTransportFactory interface { - GetTransport(trans TTransport) TTransport + GetTransport(trans TTransport) (TTransport, error) } type tTransportFactory struct{} // Return a wrapped instance of the base Transport. -func (p *tTransportFactory) GetTransport(trans TTransport) TTransport { - return trans +func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return trans, nil } func NewTTransportFactory() TTransportFactory { diff --git a/hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/type.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/type.go similarity index 100% rename from hotelReservation/vendor/github.com/apache/thrift/lib/go/thrift/type.go rename to hotelReservation/vendor/github.com/uber/jaeger-client-go/thrift/type.go diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/tracer.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/tracer.go index 49fb099e3..9a627bed5 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/tracer.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/tracer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2018 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,8 +17,10 @@ package jaeger import ( "fmt" "io" + "math/rand" "os" "reflect" + "strconv" "sync" "time" @@ -26,6 +28,7 @@ import ( "github.com/opentracing/opentracing-go/ext" "github.com/uber/jaeger-client-go/internal/baggage" + "github.com/uber/jaeger-client-go/internal/throttler" "github.com/uber/jaeger-client-go/log" "github.com/uber/jaeger-client-go/utils" ) @@ -35,38 +38,44 @@ type Tracer struct { serviceName string hostIPv4 uint32 // this is for zipkin endpoint conversion - sampler Sampler + sampler SamplerV2 reporter Reporter metrics Metrics - logger log.Logger + logger log.DebugLogger timeNow func() time.Time randomNumber func() uint64 options struct { - poolSpans bool - gen128Bit bool // whether to generate 128bit trace IDs - zipkinSharedRPCSpan bool - highTraceIDGenerator func() uint64 // custom high trace ID generator + gen128Bit bool // whether to generate 128bit trace IDs + zipkinSharedRPCSpan bool + highTraceIDGenerator func() uint64 // custom high trace ID generator + maxTagValueLength int + noDebugFlagOnForcedSampling bool + maxLogsPerSpan int // more options to come } - // pool for Span objects - spanPool sync.Pool + // allocator of Span objects + spanAllocator SpanAllocator injectors map[interface{}]Injector extractors map[interface{}]Extractor observer compositeObserver - tags []Tag + tags []Tag + process Process baggageRestrictionManager baggage.RestrictionManager baggageSetter *baggageSetter + + debugThrottler throttler.Throttler } // NewTracer creates Tracer implementation that reports tracing to Jaeger. // The returned io.Closer can be used in shutdown hooks to ensure that the internal // queue of the Reporter is drained and all buffered spans are submitted to collectors. +// TODO (breaking change) return *Tracer only, without closer. func NewTracer( serviceName string, sampler Sampler, @@ -74,15 +83,13 @@ func NewTracer( options ...TracerOption, ) (opentracing.Tracer, io.Closer) { t := &Tracer{ - serviceName: serviceName, - sampler: sampler, - reporter: reporter, - injectors: make(map[interface{}]Injector), - extractors: make(map[interface{}]Extractor), - metrics: *NewNullMetrics(), - spanPool: sync.Pool{New: func() interface{} { - return &Span{} - }}, + serviceName: serviceName, + sampler: samplerV1toV2(sampler), + reporter: reporter, + injectors: make(map[interface{}]Injector), + extractors: make(map[interface{}]Extractor), + metrics: *NewNullMetrics(), + spanAllocator: simpleSpanAllocator{}, } for _, option := range options { @@ -90,13 +97,13 @@ func NewTracer( } // register default injectors/extractors unless they are already provided via options - textPropagator := newTextMapPropagator(getDefaultHeadersConfig(), t.metrics) + textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics) t.addCodec(opentracing.TextMap, textPropagator, textPropagator) - httpHeaderPropagator := newHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics) + httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics) t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator) - binaryPropagator := newBinaryPropagator(t) + binaryPropagator := NewBinaryPropagator(t) t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator) // TODO remove after TChannel supports OpenTracing @@ -111,10 +118,23 @@ func NewTracer( } else { t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics) } + if t.debugThrottler == nil { + t.debugThrottler = throttler.DefaultThrottler{} + } + if t.randomNumber == nil { - rng := utils.NewRand(time.Now().UnixNano()) + seedGenerator := utils.NewRand(time.Now().UnixNano()) + pool := sync.Pool{ + New: func() interface{} { + return rand.NewSource(seedGenerator.Int63()) + }, + } + t.randomNumber = func() uint64 { - return uint64(rng.Int63()) + generator := pool.Get().(rand.Source) + number := uint64(generator.Int63()) + pool.Put(generator) + return number } } if t.timeNow == nil { @@ -128,7 +148,15 @@ func NewTracer( if hostname, err := os.Hostname(); err == nil { t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname}) } - if ip, err := utils.HostIP(); err == nil { + if ipval, ok := t.getTag(TracerIPTagKey); ok { + ipv4, err := utils.ParseIPToUint32(ipval.(string)) + if err != nil { + t.hostIPv4 = 0 + t.logger.Error("Unable to convert the externally provided ip to uint32: " + err.Error()) + } else { + t.hostIPv4 = ipv4 + } + } else if ip, err := utils.HostIP(); err == nil { t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()}) t.hostIPv4 = utils.PackIPAsUint32(ip) } else { @@ -143,6 +171,17 @@ func NewTracer( t.logger.Error("Overriding high trace ID generator but not generating " + "128 bit trace IDs, consider enabling the \"Gen128Bit\" option") } + if t.options.maxTagValueLength == 0 { + t.options.maxTagValueLength = DefaultMaxTagValueLength + } + t.process = Process{ + Service: serviceName, + UUID: strconv.FormatUint(t.randomNumber(), 16), + Tags: t.tags, + } + if throttler, ok := t.debugThrottler.(ProcessSetter); ok { + throttler.SetProcess(t.process) + } return t, t } @@ -177,27 +216,46 @@ func (t *Tracer) startSpanWithOptions( options.StartTime = t.timeNow() } + // Predicate whether the given span context is an empty reference + // or may be used as parent / debug ID / baggage items source + isEmptyReference := func(ctx SpanContext) bool { + return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0 + } + var references []Reference var parent SpanContext var hasParent bool // need this because `parent` is a value, not reference + var ctx SpanContext + var isSelfRef bool for _, ref := range options.References { - ctx, ok := ref.ReferencedContext.(SpanContext) + ctxRef, ok := ref.ReferencedContext.(SpanContext) if !ok { t.logger.Error(fmt.Sprintf( "Reference contains invalid type of SpanReference: %s", reflect.ValueOf(ref.ReferencedContext))) continue } - if !(ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0) { + if isEmptyReference(ctxRef) { continue } - references = append(references, Reference{Type: ref.Type, Context: ctx}) + + if ref.Type == selfRefType { + isSelfRef = true + ctx = ctxRef + continue + } + + if ctxRef.IsValid() { + // we don't want empty context that contains only debug-id or baggage + references = append(references, Reference{Type: ref.Type, Context: ctxRef}) + } + if !hasParent { - parent = ctx + parent = ctxRef hasParent = ref.Type == opentracing.ChildOfRef } } - if !hasParent && parent.IsValid() { + if !hasParent && !isEmptyReference(parent) { // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from // the FollowFromRef as the parent hasParent = true @@ -208,60 +266,77 @@ func (t *Tracer) startSpanWithOptions( rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum)) } - var samplerTags []Tag - var ctx SpanContext + var internalTags []Tag newTrace := false - if !hasParent || !parent.IsValid() { - newTrace = true - ctx.traceID.Low = t.randomID() - if t.options.gen128Bit { - ctx.traceID.High = t.options.highTraceIDGenerator() - } - ctx.spanID = SpanID(ctx.traceID.Low) - ctx.parentID = 0 - ctx.flags = byte(0) - if hasParent && parent.isDebugIDContainerOnly() { - ctx.flags |= (flagSampled | flagDebug) - samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}} - } else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled { - ctx.flags |= flagSampled - samplerTags = tags - } - } else { - ctx.traceID = parent.traceID - if rpcServer && t.options.zipkinSharedRPCSpan { - // Support Zipkin's one-span-per-RPC model - ctx.spanID = parent.spanID - ctx.parentID = parent.parentID + if !isSelfRef { + if !hasParent || !parent.IsValid() { + newTrace = true + ctx.traceID.Low = t.randomID() + if t.options.gen128Bit { + ctx.traceID.High = t.options.highTraceIDGenerator() + } + ctx.spanID = SpanID(ctx.traceID.Low) + ctx.parentID = 0 + ctx.samplingState = &samplingState{ + localRootSpan: ctx.spanID, + } + if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) { + ctx.samplingState.setDebugAndSampled() + internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID}) + } } else { - ctx.spanID = SpanID(t.randomID()) - ctx.parentID = parent.spanID + ctx.traceID = parent.traceID + if rpcServer && t.options.zipkinSharedRPCSpan { + // Support Zipkin's one-span-per-RPC model + ctx.spanID = parent.spanID + ctx.parentID = parent.parentID + } else { + ctx.spanID = SpanID(t.randomID()) + ctx.parentID = parent.spanID + } + ctx.samplingState = parent.samplingState + if parent.remote { + ctx.samplingState.setFinal() + ctx.samplingState.localRootSpan = ctx.spanID + } } - ctx.flags = parent.flags - } - if hasParent { - // copy baggage items - if l := len(parent.baggage); l > 0 { - ctx.baggage = make(map[string]string, len(parent.baggage)) - for k, v := range parent.baggage { - ctx.baggage[k] = v + if hasParent { + // copy baggage items + if l := len(parent.baggage); l > 0 { + ctx.baggage = make(map[string]string, len(parent.baggage)) + for k, v := range parent.baggage { + ctx.baggage[k] = v + } } } } sp := t.newSpan() sp.context = ctx + sp.tracer = t + sp.operationName = operationName + sp.startTime = options.StartTime + sp.duration = 0 + sp.references = references + sp.firstInProcess = rpcServer || sp.context.parentID == 0 + + if !sp.context.isSamplingFinalized() { + decision := t.sampler.OnCreateSpan(sp) + sp.applySamplingDecision(decision, false) + } sp.observer = t.observer.OnStartSpan(sp, operationName, options) - return t.startSpanInternal( - sp, - operationName, - options.StartTime, - samplerTags, - options.Tags, - newTrace, - rpcServer, - references, - ) + + if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 { + if sp.tags == nil || cap(sp.tags) < tagsTotalLength { + sp.tags = make([]Tag, 0, tagsTotalLength) + } + sp.tags = append(sp.tags, internalTags...) + for k, v := range options.Tags { + sp.setTagInternal(k, v, false) + } + } + t.emitNewSpanMetrics(sp, newTrace) + return sp } // Inject implements Inject() method of opentracing.Tracer @@ -282,17 +357,26 @@ func (t *Tracer) Extract( carrier interface{}, ) (opentracing.SpanContext, error) { if extractor, ok := t.extractors[format]; ok { - return extractor.Extract(carrier) + spanCtx, err := extractor.Extract(carrier) + if err != nil { + return nil, err // ensure returned spanCtx is nil + } + spanCtx.remote = true + return spanCtx, nil } return nil, opentracing.ErrUnsupportedFormat } // Close releases all resources used by the Tracer and flushes any remaining buffered spans. func (t *Tracer) Close() error { + t.logger.Debugf("closing tracer") t.reporter.Close() t.sampler.Close() if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok { - mgr.Close() + _ = mgr.Close() + } + if throttler, ok := t.debugThrottler.(io.Closer); ok { + _ = throttler.Close() } return nil } @@ -306,55 +390,38 @@ func (t *Tracer) Tags() []opentracing.Tag { return tags } +// getTag returns the value of specific tag, if not exists, return nil. +// TODO only used by tests, move there. +func (t *Tracer) getTag(key string) (interface{}, bool) { + for _, tag := range t.tags { + if tag.key == key { + return tag.value, true + } + } + return nil, false +} + // newSpan returns an instance of a clean Span object. // If options.PoolSpans is true, the spans are retrieved from an object pool. func (t *Tracer) newSpan() *Span { - if !t.options.poolSpans { - return &Span{} - } - sp := t.spanPool.Get().(*Span) - sp.context = emptyContext - sp.tracer = nil - sp.tags = nil - sp.logs = nil - return sp + return t.spanAllocator.Get() } -func (t *Tracer) startSpanInternal( - sp *Span, - operationName string, - startTime time.Time, - internalTags []Tag, - tags opentracing.Tags, - newTrace bool, - rpcServer bool, - references []Reference, -) *Span { - sp.tracer = t - sp.operationName = operationName - sp.startTime = startTime - sp.duration = 0 - sp.references = references - sp.firstInProcess = rpcServer || sp.context.parentID == 0 - if len(tags) > 0 || len(internalTags) > 0 { - sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags)) - copy(sp.tags, internalTags) - for k, v := range tags { - sp.observer.OnSetTag(k, v) - if k == string(ext.SamplingPriority) && setSamplingPriority(sp, v) { - continue - } - sp.setTagNoLocking(k, v) +// emitNewSpanMetrics generates metrics on the number of started spans and traces. +// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the +// server-side RPC span has the exact same trace/span/parent IDs as the +// calling client-side span, but obviously the server side span is +// no longer a root span of the trace. +func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) { + if !sp.context.isSamplingFinalized() { + t.metrics.SpansStartedDelayedSampling.Inc(1) + if newTrace { + t.metrics.TracesStartedDelayedSampling.Inc(1) } - } - // emit metrics - if sp.context.IsSampled() { + // joining a trace is not possible, because sampling decision inherited from upstream is final + } else if sp.context.IsSampled() { t.metrics.SpansStartedSampled.Inc(1) if newTrace { - // We cannot simply check for parentID==0 because in Zipkin model the - // server-side RPC span has the exact same trace/span/parent IDs as the - // calling client-side span, but obviously the server side span is - // no longer a root span of the trace. t.metrics.TracesStartedSampled.Inc(1) } else if sp.firstInProcess { t.metrics.TracesJoinedSampled.Inc(1) @@ -367,17 +434,27 @@ func (t *Tracer) startSpanInternal( t.metrics.TracesJoinedNotSampled.Inc(1) } } - return sp } func (t *Tracer) reportSpan(sp *Span) { - t.metrics.SpansFinished.Inc(1) - if sp.context.IsSampled() { - t.reporter.Report(sp) + ctx := sp.SpanContext() + + if !ctx.isSamplingFinalized() { + t.metrics.SpansFinishedDelayedSampling.Inc(1) + } else if ctx.IsSampled() { + t.metrics.SpansFinishedSampled.Inc(1) + } else { + t.metrics.SpansFinishedNotSampled.Inc(1) } - if t.options.poolSpans { - t.spanPool.Put(sp) + + // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span, + // and then Release() it when no longer needed. + // Otherwise, the span may be reused for another trace and its data may be overwritten. + if ctx.IsSampled() { + t.reporter.Report(sp) } + + sp.Release() } // randomID generates a random trace/span ID, using tracer.random() generator. @@ -390,7 +467,27 @@ func (t *Tracer) randomID() uint64 { return val } -// (NB) span should hold the lock before making this call +// (NB) span must hold the lock before making this call func (t *Tracer) setBaggage(sp *Span, key, value string) { t.baggageSetter.setBaggage(sp, key, value) } + +// (NB) span must hold the lock before making this call +func (t *Tracer) isDebugAllowed(operation string) bool { + return t.debugThrottler.IsAllowed(operation) +} + +// Sampler returns the sampler given to the tracer at creation. +func (t *Tracer) Sampler() SamplerV2 { + return t.sampler +} + +// SelfRef creates an opentracing compliant SpanReference from a jaeger +// SpanContext. This is a factory function in order to encapsulate jaeger specific +// types. +func SelfRef(ctx SpanContext) opentracing.SpanReference { + return opentracing.SpanReference{ + Type: selfRefType, + ReferencedContext: ctx, + } +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/tracer_options.go index 72edf6dbb..16b460656 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/tracer_options.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/tracer_options.go @@ -20,47 +20,52 @@ import ( "github.com/opentracing/opentracing-go" "github.com/uber/jaeger-client-go/internal/baggage" + "github.com/uber/jaeger-client-go/internal/throttler" + "github.com/uber/jaeger-client-go/log" ) // TracerOption is a function that sets some option on the tracer type TracerOption func(tracer *Tracer) -// TracerOptions is a factory for all available TracerOption's -var TracerOptions tracerOptions +// TracerOptions is a factory for all available TracerOption's. +var TracerOptions TracerOptionsFactory -type tracerOptions struct{} +// TracerOptionsFactory is a struct that defines functions for all available TracerOption's. +type TracerOptionsFactory struct{} // Metrics creates a TracerOption that initializes Metrics on the tracer, // which is used to emit statistics. -func (tracerOptions) Metrics(m *Metrics) TracerOption { +func (TracerOptionsFactory) Metrics(m *Metrics) TracerOption { return func(tracer *Tracer) { tracer.metrics = *m } } // Logger creates a TracerOption that gives the tracer a Logger. -func (tracerOptions) Logger(logger Logger) TracerOption { +func (TracerOptionsFactory) Logger(logger Logger) TracerOption { return func(tracer *Tracer) { - tracer.logger = logger + tracer.logger = log.DebugLogAdapter(logger) } } -func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption { +// CustomHeaderKeys allows to override default HTTP header keys used to propagate +// tracing context. +func (TracerOptionsFactory) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption { return func(tracer *Tracer) { if headerKeys == nil { return } - textPropagator := newTextMapPropagator(headerKeys.applyDefaults(), tracer.metrics) + textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics) tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator) - httpHeaderPropagator := newHTTPHeaderPropagator(headerKeys.applyDefaults(), tracer.metrics) + httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics) tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator) } } // TimeNow creates a TracerOption that gives the tracer a function // used to generate timestamps for spans. -func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption { +func (TracerOptionsFactory) TimeNow(timeNow func() time.Time) TracerOption { return func(tracer *Tracer) { tracer.timeNow = timeNow } @@ -68,7 +73,7 @@ func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption { // RandomNumber creates a TracerOption that gives the tracer // a thread-safe random number generator function for generating trace IDs. -func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption { +func (TracerOptionsFactory) RandomNumber(randomNumber func() uint64) TracerOption { return func(tracer *Tracer) { tracer.randomNumber = randomNumber } @@ -78,69 +83,120 @@ func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption { // an object pool to minimize span allocations. // This should be used with care, only if the service is not running any async tasks // that can access parent spans after those spans have been finished. -func (tracerOptions) PoolSpans(poolSpans bool) TracerOption { +func (TracerOptionsFactory) PoolSpans(poolSpans bool) TracerOption { return func(tracer *Tracer) { - tracer.options.poolSpans = poolSpans + if poolSpans { + tracer.spanAllocator = newSyncPollSpanAllocator() + } else { + tracer.spanAllocator = simpleSpanAllocator{} + } } } -// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process. +// HostIPv4 creates a TracerOption that identifies the current service/process. // If not set, the factory method will obtain the current IP address. // The TracerOption is deprecated; the tracer will attempt to automatically detect the IP. -func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption { +// +// Deprecated. +func (TracerOptionsFactory) HostIPv4(hostIPv4 uint32) TracerOption { return func(tracer *Tracer) { tracer.hostIPv4 = hostIPv4 } } -func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption { +// Injector registers a Injector for given format. +func (TracerOptionsFactory) Injector(format interface{}, injector Injector) TracerOption { return func(tracer *Tracer) { tracer.injectors[format] = injector } } -func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption { +// Extractor registers an Extractor for given format. +func (TracerOptionsFactory) Extractor(format interface{}, extractor Extractor) TracerOption { return func(tracer *Tracer) { tracer.extractors[format] = extractor } } -func (t tracerOptions) Observer(observer Observer) TracerOption { +// Observer registers an Observer. +func (t TracerOptionsFactory) Observer(observer Observer) TracerOption { return t.ContribObserver(&oldObserver{obs: observer}) } -func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption { +// ContribObserver registers a ContribObserver. +func (TracerOptionsFactory) ContribObserver(observer ContribObserver) TracerOption { return func(tracer *Tracer) { tracer.observer.append(observer) } } -func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption { +// Gen128Bit enables generation of 128bit trace IDs. +func (TracerOptionsFactory) Gen128Bit(gen128Bit bool) TracerOption { return func(tracer *Tracer) { tracer.options.gen128Bit = gen128Bit } } -func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption { +// NoDebugFlagOnForcedSampling turns off setting the debug flag in the trace context +// when the trace is force-started via sampling=1 span tag. +func (TracerOptionsFactory) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption { + return func(tracer *Tracer) { + tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling + } +} + +// HighTraceIDGenerator allows to override define ID generator. +func (TracerOptionsFactory) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption { return func(tracer *Tracer) { tracer.options.highTraceIDGenerator = highTraceIDGenerator } } -func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption { +// MaxTagValueLength sets the limit on the max length of tag values. +func (TracerOptionsFactory) MaxTagValueLength(maxTagValueLength int) TracerOption { + return func(tracer *Tracer) { + tracer.options.maxTagValueLength = maxTagValueLength + } +} + +// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero +// value). If a span has more logs than this value, logs are dropped as +// necessary (and replaced with a log describing how many were dropped). +// +// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about +// half are the newest logs. +func (TracerOptionsFactory) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption { + return func(tracer *Tracer) { + tracer.options.maxLogsPerSpan = maxLogsPerSpan + } +} + +// ZipkinSharedRPCSpan enables a mode where server-side span shares the span ID +// from the client span from the incoming request, for compatibility with Zipkin's +// "one span per RPC" model. +func (TracerOptionsFactory) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption { return func(tracer *Tracer) { tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan } } -func (tracerOptions) Tag(key string, value interface{}) TracerOption { +// Tag adds a tracer-level tag that will be added to all spans. +func (TracerOptionsFactory) Tag(key string, value interface{}) TracerOption { return func(tracer *Tracer) { tracer.tags = append(tracer.tags, Tag{key: key, value: value}) } } -func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption { +// BaggageRestrictionManager registers BaggageRestrictionManager. +func (TracerOptionsFactory) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption { return func(tracer *Tracer) { tracer.baggageRestrictionManager = mgr } } + +// DebugThrottler registers a Throttler for debug spans. +func (TracerOptionsFactory) DebugThrottler(throttler throttler.Throttler) TracerOption { + return func(tracer *Tracer) { + tracer.debugThrottler = throttler + } +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport/doc.go new file mode 100644 index 000000000..6b961fb63 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport defines various transports that can be used with +// RemoteReporter to send spans out of process. Transport is responsible +// for serializing the spans into a specific format suitable for sending +// to the tracing backend. Examples may include Thrift over UDP, Thrift +// or JSON over HTTP, Thrift over Kafka, etc. +// +// Implementations are NOT required to be thread-safe; the RemoteReporter +// is expected to only call methods on the Transport from the same go-routine. +package transport diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport/http.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport/http.go new file mode 100644 index 000000000..1d6f14d32 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport/http.go @@ -0,0 +1,175 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/uber/jaeger-client-go/thrift" + + "github.com/uber/jaeger-client-go" + j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" +) + +// Default timeout for http request in seconds +const defaultHTTPTimeout = time.Second * 5 + +// HTTPTransport implements Transport by forwarding spans to a http server. +type HTTPTransport struct { + url string + client *http.Client + batchSize int + spans []*j.Span + process *j.Process + httpCredentials *HTTPBasicAuthCredentials + headers map[string]string +} + +// HTTPBasicAuthCredentials stores credentials for HTTP basic auth. +type HTTPBasicAuthCredentials struct { + username string + password string +} + +// HTTPOption sets a parameter for the HttpCollector +type HTTPOption func(c *HTTPTransport) + +// HTTPTimeout sets maximum timeout for http request. +func HTTPTimeout(duration time.Duration) HTTPOption { + return func(c *HTTPTransport) { c.client.Timeout = duration } +} + +// HTTPBatchSize sets the maximum batch size, after which a collect will be +// triggered. The default batch size is 100 spans. +func HTTPBatchSize(n int) HTTPOption { + return func(c *HTTPTransport) { c.batchSize = n } +} + +// HTTPBasicAuth sets the credentials required to perform HTTP basic auth +func HTTPBasicAuth(username string, password string) HTTPOption { + return func(c *HTTPTransport) { + c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password} + } +} + +// HTTPRoundTripper configures the underlying Transport on the *http.Client +// that is used +func HTTPRoundTripper(transport http.RoundTripper) HTTPOption { + return func(c *HTTPTransport) { + c.client.Transport = transport + } +} + +// HTTPHeaders defines the HTTP headers that will be attached to the jaeger client's HTTP request +func HTTPHeaders(headers map[string]string) HTTPOption { + return func(c *HTTPTransport) { + c.headers = headers + } +} + +// NewHTTPTransport returns a new HTTP-backend transport. url should be an http +// url of the collector to handle POST request, typically something like: +// http://hostname:14268/api/traces?format=jaeger.thrift +func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport { + c := &HTTPTransport{ + url: url, + client: &http.Client{Timeout: defaultHTTPTimeout}, + batchSize: 100, + spans: []*j.Span{}, + } + + for _, option := range options { + option(c) + } + return c +} + +// Append implements Transport. +func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) { + if c.process == nil { + c.process = jaeger.BuildJaegerProcessThrift(span) + } + jSpan := jaeger.BuildJaegerThrift(span) + c.spans = append(c.spans, jSpan) + if len(c.spans) >= c.batchSize { + return c.Flush() + } + return 0, nil +} + +// Flush implements Transport. +func (c *HTTPTransport) Flush() (int, error) { + count := len(c.spans) + if count == 0 { + return 0, nil + } + err := c.send(c.spans) + c.spans = c.spans[:0] + return count, err +} + +// Close implements Transport. +func (c *HTTPTransport) Close() error { + return nil +} + +func (c *HTTPTransport) send(spans []*j.Span) error { + batch := &j.Batch{ + Spans: spans, + Process: c.process, + } + body, err := serializeThrift(batch) + if err != nil { + return err + } + req, err := http.NewRequest("POST", c.url, body) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/x-thrift") + for k, v := range c.headers { + req.Header.Set(k, v) + } + + if c.httpCredentials != nil { + req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password) + } + + resp, err := c.client.Do(req) + if err != nil { + return err + } + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + if resp.StatusCode >= http.StatusBadRequest { + return fmt.Errorf("error from collector: %d", resp.StatusCode) + } + return nil +} + +func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) { + t := thrift.NewTMemoryBuffer() + p := thrift.NewTBinaryProtocolTransport(t) + if err := obj.Write(context.Background(), p); err != nil { + return nil, err + } + return t.Buffer, nil +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport_udp.go index cb83cdf9b..00004124c 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport_udp.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/transport_udp.go @@ -15,10 +15,13 @@ package jaeger import ( + "context" "errors" + "fmt" - "github.com/apache/thrift/lib/go/thrift" - + "github.com/uber/jaeger-client-go/internal/reporterstats" + "github.com/uber/jaeger-client-go/log" + "github.com/uber/jaeger-client-go/thrift" j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" "github.com/uber/jaeger-client-go/utils" ) @@ -26,14 +29,14 @@ import ( // Empirically obtained constant for how many bytes in the message are used for envelope. // The total datagram size is: // sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize -// There is a unit test `TestEmitBatchOverhead` that validates this number. +// // Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans // in the batch, because the length of the list is encoded as varint32, as well as SeqId. -const emitBatchOverhead = 30 - -const defaultUDPSpanServerHostPort = "localhost:6831" +// +// There is a unit test `TestEmitBatchOverhead` that validates this number, it fails at <68. +const emitBatchOverhead = 70 -var errSpanTooLarge = errors.New("Span is too large") +var errSpanTooLarge = errors.New("span is too large") type udpSender struct { client *utils.AgentClientUDP @@ -45,39 +48,76 @@ type udpSender struct { thriftProtocol thrift.TProtocol process *j.Process processByteSize int + + // reporterStats provides access to stats that are only known to Reporter + reporterStats reporterstats.ReporterStats + + // The following counters are always non-negative, but we need to send them in signed i64 Thrift fields, + // so we keep them as signed. At 10k QPS, overflow happens in about 300 million years. + batchSeqNo int64 + tooLargeDroppedSpans int64 + failedToEmitSpans int64 } -// NewUDPTransport creates a reporter that submits spans to jaeger-agent -func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { - if len(hostPort) == 0 { - hostPort = defaultUDPSpanServerHostPort +// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should +// be passed to NewUDPTransportWithParams. +type UDPTransportParams struct { + utils.AgentClientUDPParams +} + +// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent. +// TODO: (breaking change) move to transport/ package. +func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) { + if len(params.HostPort) == 0 { + params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort) } - if maxPacketSize == 0 { - maxPacketSize = utils.UDPPacketMaxLength + + if params.Logger == nil { + params.Logger = log.StdLogger + } + + if params.MaxPacketSize == 0 { + params.MaxPacketSize = utils.UDPPacketMaxLength } protocolFactory := thrift.NewTCompactProtocolFactory() // Each span is first written to thriftBuffer to determine its size in bytes. - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) - client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize) + client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams) if err != nil { return nil, err } - sender := &udpSender{ + return &udpSender{ client: client, - maxSpanBytes: maxPacketSize - emitBatchOverhead, + maxSpanBytes: params.MaxPacketSize - emitBatchOverhead, thriftBuffer: thriftBuffer, - thriftProtocol: thriftProtocol} - return sender, nil + thriftProtocol: thriftProtocol, + }, nil +} + +// NewUDPTransport creates a reporter that submits spans to jaeger-agent. +// TODO: (breaking change) move to transport/ package. +func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { + return NewUDPTransportWithParams(UDPTransportParams{ + AgentClientUDPParams: utils.AgentClientUDPParams{ + HostPort: hostPort, + MaxPacketSize: maxPacketSize, + }, + }) +} + +// SetReporterStats implements reporterstats.Receiver. +func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) { + s.reporterStats = rs } func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int { s.thriftBuffer.Reset() - thriftStruct.Write(s.thriftProtocol) + _ = thriftStruct.Write(context.Background(), s.thriftProtocol) return s.thriftBuffer.Len() } @@ -90,6 +130,7 @@ func (s *udpSender) Append(span *Span) (int, error) { jSpan := BuildJaegerThrift(span) spanSize := s.calcSizeOfSerializedThrift(jSpan) if spanSize > s.maxSpanBytes { + s.tooLargeDroppedSpans++ return 1, errSpanTooLarge } @@ -113,9 +154,18 @@ func (s *udpSender) Flush() (int, error) { if n == 0 { return 0, nil } - err := s.client.EmitBatch(&j.Batch{Process: s.process, Spans: s.spanBuffer}) + s.batchSeqNo++ + batchSeqNo := int64(s.batchSeqNo) + err := s.client.EmitBatch(context.Background(), &j.Batch{ + Process: s.process, + Spans: s.spanBuffer, + SeqNo: &batchSeqNo, + Stats: s.makeStats(), + }) s.resetBuffers() - + if err != nil { + s.failedToEmitSpans += int64(n) + } return n, err } @@ -130,3 +180,15 @@ func (s *udpSender) resetBuffers() { s.spanBuffer = s.spanBuffer[:0] s.byteBufferSize = s.processByteSize } + +func (s *udpSender) makeStats() *j.ClientStats { + var dropped int64 + if s.reporterStats != nil { + dropped = s.reporterStats.SpansDroppedFromQueue() + } + return &j.ClientStats{ + FullQueueDroppedSpans: dropped, + TooLargeDroppedSpans: s.tooLargeDroppedSpans, + FailedToEmitSpans: s.failedToEmitSpans, + } +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go index 1b8db9758..bf2f13165 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go @@ -20,22 +20,15 @@ import ( ) // RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits. +// +// TODO (breaking change) remove this interface in favor of public struct below +// +// Deprecated, use ReconfigurableRateLimiter. type RateLimiter interface { CheckCredit(itemCost float64) bool } -type rateLimiter struct { - sync.Mutex - - creditsPerSecond float64 - balance float64 - maxBalance float64 - lastTick time.Time - - timeNow func() time.Time -} - -// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a +// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a // credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional // to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost // of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased" @@ -47,31 +40,73 @@ type rateLimiter struct { // // It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput // as bytes/second, and calling CheckCredit() with the actual message size. -func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter { - return &rateLimiter{ +// +// TODO (breaking change) rename to RateLimiter once the interface is removed +type ReconfigurableRateLimiter struct { + lock sync.Mutex + + creditsPerSecond float64 + balance float64 + maxBalance float64 + lastTick time.Time + + timeNow func() time.Time +} + +// NewRateLimiter creates a new ReconfigurableRateLimiter. +func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter { + return &ReconfigurableRateLimiter{ creditsPerSecond: creditsPerSecond, balance: maxBalance, maxBalance: maxBalance, lastTick: time.Now(), - timeNow: time.Now} + timeNow: time.Now, + } } -func (b *rateLimiter) CheckCredit(itemCost float64) bool { - b.Lock() - defer b.Unlock() - // calculate how much time passed since the last tick, and update current tick - currentTime := b.timeNow() - elapsedTime := currentTime.Sub(b.lastTick) - b.lastTick = currentTime - // calculate how much credit have we accumulated since the last tick - b.balance += elapsedTime.Seconds() * b.creditsPerSecond - if b.balance > b.maxBalance { - b.balance = b.maxBalance - } +// CheckCredit tries to reduce the current balance by itemCost provided that the current balance +// is not lest than itemCost. +func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool { + rl.lock.Lock() + defer rl.lock.Unlock() + // if we have enough credits to pay for current item, then reduce balance and allow - if b.balance >= itemCost { - b.balance -= itemCost + if rl.balance >= itemCost { + rl.balance -= itemCost + return true + } + // otherwise check if balance can be increased due to time elapsed, and try again + rl.updateBalance() + if rl.balance >= itemCost { + rl.balance -= itemCost return true } return false } + +// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock. +func (rl *ReconfigurableRateLimiter) updateBalance() { + // calculate how much time passed since the last tick, and update current tick + currentTime := rl.timeNow() + elapsedTime := currentTime.Sub(rl.lastTick) + rl.lastTick = currentTime + // calculate how much credit have we accumulated since the last tick + rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond + if rl.balance > rl.maxBalance { + rl.balance = rl.maxBalance + } +} + +// Update changes the main parameters of the rate limiter in-place, while retaining +// the current accumulated balance (pro-rated to the new maxBalance value). Using this method +// instead of creating a new rate limiter helps to avoid thundering herd when sampling +// strategies are updated. +func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) { + rl.lock.Lock() + defer rl.lock.Unlock() + + rl.updateBalance() // get up to date balance + rl.balance = rl.balance * maxBalance / rl.maxBalance + rl.creditsPerSecond = creditsPerSecond + rl.maxBalance = maxBalance +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go new file mode 100644 index 000000000..0dffc7fa2 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go @@ -0,0 +1,189 @@ +// Copyright (c) 2020 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/uber/jaeger-client-go/log" +) + +// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +type reconnectingUDPConn struct { + hostPort string + resolveFunc resolveFunc + dialFunc dialFunc + logger log.Logger + bufferBytes int64 + + connMtx sync.RWMutex + conn *net.UDPConn + destAddr *net.UDPAddr + closeChan chan struct{} +} + +type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error) +type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) + +// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) { + conn := &reconnectingUDPConn{ + hostPort: hostPort, + resolveFunc: resolveFunc, + dialFunc: dialFunc, + logger: logger, + closeChan: make(chan struct{}), + } + + if err := conn.attemptResolveAndDial(); err != nil { + logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout)) + } + + go conn.reconnectLoop(resolveTimeout) + + return conn, nil +} + +func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) { + ticker := time.NewTicker(resolveTimeout) + defer ticker.Stop() + + for { + select { + case <-c.closeChan: + return + case <-ticker.C: + if err := c.attemptResolveAndDial(); err != nil { + c.logger.Error(err.Error()) + } + } + } +} + +func (c *reconnectingUDPConn) attemptResolveAndDial() error { + newAddr, err := c.resolveFunc("udp", c.hostPort) + if err != nil { + return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err) + } + + c.connMtx.RLock() + curAddr := c.destAddr + c.connMtx.RUnlock() + + // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn + if curAddr != nil && newAddr.String() == curAddr.String() { + return nil + } + + if err := c.attemptDialNewAddr(newAddr); err != nil { + return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err) + } + + return nil +} + +func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error { + connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr) + if err != nil { + return err + } + + if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 { + if err = connUDP.SetWriteBuffer(bufferBytes); err != nil { + return err + } + } + + c.connMtx.Lock() + c.destAddr = newAddr + // store prev to close later + prevConn := c.conn + c.conn = connUDP + c.connMtx.Unlock() + + if prevConn != nil { + return prevConn.Close() + } + + return nil +} + +// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning +func (c *reconnectingUDPConn) Write(b []byte) (int, error) { + var bytesWritten int + var err error + + c.connMtx.RLock() + if c.conn == nil { + // if connection is not initialized indicate this with err in order to hook into retry logic + err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved") + } else { + bytesWritten, err = c.conn.Write(b) + } + c.connMtx.RUnlock() + + if err == nil { + return bytesWritten, nil + } + + // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again + if reconnErr := c.attemptResolveAndDial(); reconnErr == nil { + c.connMtx.RLock() + defer c.connMtx.RUnlock() + return c.conn.Write(b) + } + + // return original error if reconn fails + return bytesWritten, err +} + +// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation +func (c *reconnectingUDPConn) Close() error { + close(c.closeChan) + + // acquire rw lock before closing conn to ensure calls to Write drain + c.connMtx.Lock() + defer c.connMtx.Unlock() + + if c.conn != nil { + return c.conn.Close() + } + + return nil +} + +// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held +// and SetWriteBuffer is called store bufferBytes to be set for new conns +func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error { + var err error + + c.connMtx.RLock() + if c.conn != nil { + err = c.conn.SetWriteBuffer(bytes) + } + c.connMtx.RUnlock() + + if err == nil { + atomic.StoreInt64(&c.bufferBytes, int64(bytes)) + } + + return err +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go index 2374686e5..4c59ae9dd 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go @@ -15,12 +15,15 @@ package utils import ( + "context" "errors" "fmt" "io" "net" + "time" - "github.com/apache/thrift/lib/go/thrift" + "github.com/uber/jaeger-client-go/log" + "github.com/uber/jaeger-client-go/thrift" "github.com/uber/jaeger-client-go/thrift-gen/agent" "github.com/uber/jaeger-client-go/thrift-gen/jaeger" @@ -35,57 +38,105 @@ type AgentClientUDP struct { agent.Agent io.Closer - connUDP *net.UDPConn + connUDP udpConn client *agent.AgentClient maxPacketSize int // max size of datagram in bytes thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span } -// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { - if maxPacketSize == 0 { - maxPacketSize = UDPPacketMaxLength +type udpConn interface { + Write([]byte) (int, error) + SetWriteBuffer(int) error + Close() error +} + +// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should +// be passed to NewAgentClientUDPWithParams. +type AgentClientUDPParams struct { + HostPort string + MaxPacketSize int + Logger log.Logger + DisableAttemptReconnecting bool + AttemptReconnectInterval time.Duration +} + +// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP. +func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) { + // validate hostport + if _, _, err := net.SplitHostPort(params.HostPort); err != nil { + return nil, err + } + + if params.MaxPacketSize == 0 { + params.MaxPacketSize = UDPPacketMaxLength + } + + if params.Logger == nil { + params.Logger = log.StdLogger } - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 { + params.AttemptReconnectInterval = time.Second * 30 + } + + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) protocolFactory := thrift.NewTCompactProtocolFactory() client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory) - destAddr, err := net.ResolveUDPAddr("udp", hostPort) - if err != nil { - return nil, err - } + var connUDP udpConn + var err error - connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err + if params.DisableAttemptReconnecting { + destAddr, err := net.ResolveUDPAddr("udp", params.HostPort) + if err != nil { + return nil, err + } + + connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) + if err != nil { + return nil, err + } + } else { + // host is hostname, setup resolver loop in case host record changes during operation + connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) + if err != nil { + return nil, err + } } - if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil { + + if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { return nil, err } - clientUDP := &AgentClientUDP{ + return &AgentClientUDP{ connUDP: connUDP, client: client, - maxPacketSize: maxPacketSize, - thriftBuffer: thriftBuffer} - return clientUDP, nil + maxPacketSize: params.MaxPacketSize, + thriftBuffer: thriftBuffer, + }, nil +} + +// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. +func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { + return NewAgentClientUDPWithParams(AgentClientUDPParams{ + HostPort: hostPort, + MaxPacketSize: maxPacketSize, + }) } // EmitZipkinBatch implements EmitZipkinBatch() of Agent interface -func (a *AgentClientUDP) EmitZipkinBatch(spans []*zipkincore.Span) error { +func (a *AgentClientUDP) EmitZipkinBatch(context.Context, []*zipkincore.Span) error { return errors.New("Not implemented") } // EmitBatch implements EmitBatch() of Agent interface -func (a *AgentClientUDP) EmitBatch(batch *jaeger.Batch) error { +func (a *AgentClientUDP) EmitBatch(ctx context.Context, batch *jaeger.Batch) error { a.thriftBuffer.Reset() - a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages - if err := a.client.EmitBatch(batch); err != nil { + if err := a.client.EmitBatch(ctx, batch); err != nil { return err } if a.thriftBuffer.Len() > a.maxPacketSize { - return fmt.Errorf("Data does not fit within one UDP packet; size %d, max %d, spans %d", + return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d", a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) } _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/zipkin.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/zipkin.go index 636952b7f..98cab4b6e 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/zipkin.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/zipkin.go @@ -55,7 +55,7 @@ func (p *zipkinPropagator) Inject( carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs carrier.SetSpanID(uint64(ctx.SpanID())) carrier.SetParentID(uint64(ctx.ParentID())) - carrier.SetFlags(ctx.flags) + carrier.SetFlags(ctx.samplingState.flags()) return nil } @@ -71,6 +71,7 @@ func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, er ctx.traceID.Low = carrier.TraceID() ctx.spanID = SpanID(carrier.SpanID()) ctx.parentID = SpanID(carrier.ParentID()) - ctx.flags = carrier.Flags() + ctx.samplingState = &samplingState{} + ctx.samplingState.setFlags(carrier.Flags()) return ctx, nil } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/hotelReservation/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go index b2e9a3e64..73aeb000f 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go @@ -27,9 +27,6 @@ import ( ) const ( - // maxAnnotationLength is the max length of byte array or string allowed in the annotations - maxAnnotationLength = 256 - // Zipkin UI does not work well with non-string tag values allowPackedNumbers = false ) @@ -43,6 +40,7 @@ var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){ } // BuildZipkinThrift builds thrift span based on internal span. +// TODO: (breaking change) move to transport/zipkin and make private. func BuildZipkinThrift(s *Span) *z.Span { span := &zipkinSpan{Span: s} span.handleSpecialTags() @@ -51,13 +49,19 @@ func BuildZipkinThrift(s *Span) *z.Span { if parentID != 0 { ptrParentID = &parentID } + traceIDHigh := int64(span.context.traceID.High) + var ptrTraceIDHigh *int64 + if traceIDHigh != 0 { + ptrTraceIDHigh = &traceIDHigh + } timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) duration := span.duration.Nanoseconds() / int64(time.Microsecond) endpoint := &z.Endpoint{ ServiceName: span.tracer.serviceName, Ipv4: int32(span.tracer.hostIPv4)} thriftSpan := &z.Span{ - TraceID: int64(span.context.traceID.Low), // TODO upgrade zipkin thrift and use TraceIdHigh + TraceID: int64(span.context.traceID.Low), + TraceIDHigh: ptrTraceIDHigh, ID: int64(span.context.spanID), ParentID: ptrParentID, Name: span.operationName, @@ -98,7 +102,7 @@ func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation { Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp), Host: endpoint} if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil { - anno.Value = truncateString(string(content)) + anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength) } else { anno.Value = err.Error() } @@ -148,21 +152,21 @@ func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryA if _, ok := specialTagHandlers[tag.key]; ok { continue } - if anno := buildBinaryAnnotation(tag.key, tag.value, nil); anno != nil { + if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil { annotations = append(annotations, anno) } } return annotations } -func buildBinaryAnnotation(key string, val interface{}, endpoint *z.Endpoint) *z.BinaryAnnotation { +func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation { bann := &z.BinaryAnnotation{Key: key, Host: endpoint} if value, ok := val.(string); ok { - bann.Value = []byte(truncateString(value)) + bann.Value = []byte(truncateString(value, maxTagValueLength)) bann.AnnotationType = z.AnnotationType_STRING } else if value, ok := val.([]byte); ok { - if len(value) > maxAnnotationLength { - value = value[:maxAnnotationLength] + if len(value) > maxTagValueLength { + value = value[:maxTagValueLength] } bann.Value = value bann.AnnotationType = z.AnnotationType_BYTES @@ -180,7 +184,7 @@ func buildBinaryAnnotation(key string, val interface{}, endpoint *z.Endpoint) *z bann.AnnotationType = z.AnnotationType_BOOL } else { value := stringify(val) - bann.Value = []byte(truncateString(value)) + bann.Value = []byte(truncateString(value, maxTagValueLength)) bann.AnnotationType = z.AnnotationType_STRING } return bann @@ -193,12 +197,12 @@ func stringify(value interface{}) string { return fmt.Sprintf("%+v", value) } -func truncateString(value string) string { +func truncateString(value string, maxLength int) string { // we ignore the problem of utf8 runes possibly being sliced in the middle, // as it is rather expensive to iterate through each tag just to find rune // boundaries. - if len(value) > maxAnnotationLength { - return value[:maxAnnotationLength] + if len(value) > maxLength { + return value[:maxLength] } return value } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/factory.go index a744a890d..0ead061eb 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/factory.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/factory.go @@ -14,14 +14,48 @@ package metrics +import ( + "time" +) + +// NSOptions defines the name and tags map associated with a factory namespace +type NSOptions struct { + Name string + Tags map[string]string +} + +// Options defines the information associated with a metric +type Options struct { + Name string + Tags map[string]string + Help string +} + +// TimerOptions defines the information associated with a metric +type TimerOptions struct { + Name string + Tags map[string]string + Help string + Buckets []time.Duration +} + +// HistogramOptions defines the information associated with a metric +type HistogramOptions struct { + Name string + Tags map[string]string + Help string + Buckets []float64 +} + // Factory creates new metrics type Factory interface { - Counter(name string, tags map[string]string) Counter - Timer(name string, tags map[string]string) Timer - Gauge(name string, tags map[string]string) Gauge + Counter(metric Options) Counter + Timer(metric TimerOptions) Timer + Gauge(metric Options) Gauge + Histogram(metric HistogramOptions) Histogram // Namespace returns a nested metrics factory. - Namespace(name string, tags map[string]string) Factory + Namespace(scope NSOptions) Factory } // NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge. @@ -29,7 +63,16 @@ var NullFactory Factory = nullFactory{} type nullFactory struct{} -func (nullFactory) Counter(name string, tags map[string]string) Counter { return NullCounter } -func (nullFactory) Timer(name string, tags map[string]string) Timer { return NullTimer } -func (nullFactory) Gauge(name string, tags map[string]string) Gauge { return NullGauge } -func (nullFactory) Namespace(name string, tags map[string]string) Factory { return NullFactory } +func (nullFactory) Counter(options Options) Counter { + return NullCounter +} +func (nullFactory) Timer(options TimerOptions) Timer { + return NullTimer +} +func (nullFactory) Gauge(options Options) Gauge { + return NullGauge +} +func (nullFactory) Histogram(options HistogramOptions) Histogram { + return NullHistogram +} +func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory } diff --git a/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/histogram.go new file mode 100644 index 000000000..d3bd6174f --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/histogram.go @@ -0,0 +1,28 @@ +// Copyright (c) 2018 The Jaeger Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +// Histogram that keeps track of a distribution of values. +type Histogram interface { + // Records the value passed in. + Record(float64) +} + +// NullHistogram that does nothing +var NullHistogram Histogram = nullHistogram{} + +type nullHistogram struct{} + +func (nullHistogram) Record(float64) {} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/keys.go new file mode 100644 index 000000000..c24445a10 --- /dev/null +++ b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/keys.go @@ -0,0 +1,35 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import ( + "sort" +) + +// GetKey converts name+tags into a single string of the form +// "name|tag1=value1|...|tagN=valueN", where tag names are +// sorted alphabetically. +func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string { + keys := make([]string, 0, len(tags)) + for k := range tags { + keys = append(keys, k) + } + sort.Strings(keys) + key := name + for _, k := range keys { + key = key + tagsSep + k + tagKVSep + tags[k] + } + return key +} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/local.go b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/local.go deleted file mode 100644 index 8c3624849..000000000 --- a/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/local.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/codahale/hdrhistogram" -) - -// This is intentionally very similar to github.com/codahale/metrics, the -// main difference being that counters/gauges are scoped to the provider -// rather than being global (to facilitate testing). - -// A LocalBackend is a metrics provider which aggregates data in-vm, and -// allows exporting snapshots to shove the data into a remote collector -type LocalBackend struct { - cm sync.Mutex - gm sync.Mutex - tm sync.Mutex - counters map[string]*int64 - gauges map[string]*int64 - timers map[string]*localBackendTimer - stop chan struct{} - wg sync.WaitGroup - TagsSep string - TagKVSep string -} - -// NewLocalBackend returns a new LocalBackend. The collectionInterval is the histogram -// time window for each timer. -func NewLocalBackend(collectionInterval time.Duration) *LocalBackend { - b := &LocalBackend{ - counters: make(map[string]*int64), - gauges: make(map[string]*int64), - timers: make(map[string]*localBackendTimer), - stop: make(chan struct{}), - TagsSep: "|", - TagKVSep: "=", - } - if collectionInterval == 0 { - // Use one histogram time window for all timers - return b - } - b.wg.Add(1) - go b.runLoop(collectionInterval) - return b -} - -// Clear discards accumulated stats -func (b *LocalBackend) Clear() { - b.cm.Lock() - defer b.cm.Unlock() - b.gm.Lock() - defer b.gm.Unlock() - b.tm.Lock() - defer b.tm.Unlock() - b.counters = make(map[string]*int64) - b.gauges = make(map[string]*int64) - b.timers = make(map[string]*localBackendTimer) -} - -func (b *LocalBackend) runLoop(collectionInterval time.Duration) { - defer b.wg.Done() - ticker := time.NewTicker(collectionInterval) - for { - select { - case <-ticker.C: - b.tm.Lock() - timers := make(map[string]*localBackendTimer, len(b.timers)) - for timerName, timer := range b.timers { - timers[timerName] = timer - } - b.tm.Unlock() - - for _, t := range timers { - t.Lock() - t.hist.Rotate() - t.Unlock() - } - case <-b.stop: - ticker.Stop() - return - } - } -} - -// IncCounter increments a counter value -func (b *LocalBackend) IncCounter(name string, tags map[string]string, delta int64) { - name = GetKey(name, tags, b.TagsSep, b.TagKVSep) - b.cm.Lock() - defer b.cm.Unlock() - counter := b.counters[name] - if counter == nil { - b.counters[name] = new(int64) - *b.counters[name] = delta - return - } - atomic.AddInt64(counter, delta) -} - -// UpdateGauge updates the value of a gauge -func (b *LocalBackend) UpdateGauge(name string, tags map[string]string, value int64) { - name = GetKey(name, tags, b.TagsSep, b.TagKVSep) - b.gm.Lock() - defer b.gm.Unlock() - gauge := b.gauges[name] - if gauge == nil { - b.gauges[name] = new(int64) - *b.gauges[name] = value - return - } - atomic.StoreInt64(gauge, value) -} - -// RecordTimer records a timing duration -func (b *LocalBackend) RecordTimer(name string, tags map[string]string, d time.Duration) { - name = GetKey(name, tags, b.TagsSep, b.TagKVSep) - timer := b.findOrCreateTimer(name) - timer.Lock() - timer.hist.Current.RecordValue(int64(d / time.Millisecond)) - timer.Unlock() -} - -func (b *LocalBackend) findOrCreateTimer(name string) *localBackendTimer { - b.tm.Lock() - defer b.tm.Unlock() - if t, ok := b.timers[name]; ok { - return t - } - - t := &localBackendTimer{ - hist: hdrhistogram.NewWindowed(5, 0, int64((5*time.Minute)/time.Millisecond), 1), - } - b.timers[name] = t - return t -} - -type localBackendTimer struct { - sync.Mutex - hist *hdrhistogram.WindowedHistogram -} - -var ( - percentiles = map[string]float64{ - "P50": 50, - "P75": 75, - "P90": 90, - "P95": 95, - "P99": 99, - "P999": 99.9, - } -) - -// Snapshot captures a snapshot of the current counter and gauge values -func (b *LocalBackend) Snapshot() (counters, gauges map[string]int64) { - b.cm.Lock() - defer b.cm.Unlock() - - counters = make(map[string]int64, len(b.counters)) - for name, value := range b.counters { - counters[name] = atomic.LoadInt64(value) - } - - b.gm.Lock() - defer b.gm.Unlock() - - gauges = make(map[string]int64, len(b.gauges)) - for name, value := range b.gauges { - gauges[name] = atomic.LoadInt64(value) - } - - b.tm.Lock() - timers := make(map[string]*localBackendTimer) - for timerName, timer := range b.timers { - timers[timerName] = timer - } - b.tm.Unlock() - - for timerName, timer := range timers { - timer.Lock() - hist := timer.hist.Merge() - timer.Unlock() - for name, q := range percentiles { - gauges[timerName+"."+name] = hist.ValueAtQuantile(q) - } - } - - return -} - -// Stop cleanly closes the background goroutine spawned by NewLocalBackend. -func (b *LocalBackend) Stop() { - close(b.stop) - b.wg.Wait() -} - -// GetKey converts name+tags into a single string of the form -// "name|tag1=value1|...|tagN=valueN", where tag names are -// sorted alphabetically. -func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string { - keys := make([]string, 0, len(tags)) - for k := range tags { - keys = append(keys, k) - } - sort.Strings(keys) - key := name - for _, k := range keys { - key = key + tagsSep + k + tagKVSep + tags[k] - } - return key -} - -type stats struct { - name string - tags map[string]string - localBackend *LocalBackend -} - -type localTimer struct { - stats -} - -func (l *localTimer) Record(d time.Duration) { - l.localBackend.RecordTimer(l.name, l.tags, d) -} - -type localCounter struct { - stats -} - -func (l *localCounter) Inc(delta int64) { - l.localBackend.IncCounter(l.name, l.tags, delta) -} - -type localGauge struct { - stats -} - -func (l *localGauge) Update(value int64) { - l.localBackend.UpdateGauge(l.name, l.tags, value) -} - -// LocalFactory stats factory that creates metrics that are stored locally -type LocalFactory struct { - *LocalBackend - namespace string - tags map[string]string -} - -// NewLocalFactory returns a new LocalMetricsFactory -func NewLocalFactory(collectionInterval time.Duration) *LocalFactory { - return &LocalFactory{ - LocalBackend: NewLocalBackend(collectionInterval), - } -} - -// appendTags adds the tags to the namespace tags and returns a combined map. -func (l *LocalFactory) appendTags(tags map[string]string) map[string]string { - newTags := make(map[string]string) - for k, v := range l.tags { - newTags[k] = v - } - for k, v := range tags { - newTags[k] = v - } - return newTags -} - -func (l *LocalFactory) newNamespace(name string) string { - if l.namespace == "" { - return name - } - return l.namespace + "." + name -} - -// Counter returns a local stats counter -func (l *LocalFactory) Counter(name string, tags map[string]string) Counter { - return &localCounter{ - stats{ - name: l.newNamespace(name), - tags: l.appendTags(tags), - localBackend: l.LocalBackend, - }, - } -} - -// Timer returns a local stats timer. -func (l *LocalFactory) Timer(name string, tags map[string]string) Timer { - return &localTimer{ - stats{ - name: l.newNamespace(name), - tags: l.appendTags(tags), - localBackend: l.LocalBackend, - }, - } -} - -// Gauge returns a local stats gauge. -func (l *LocalFactory) Gauge(name string, tags map[string]string) Gauge { - return &localGauge{ - stats{ - name: l.newNamespace(name), - tags: l.appendTags(tags), - localBackend: l.LocalBackend, - }, - } -} - -// Namespace returns a new namespace. -func (l *LocalFactory) Namespace(name string, tags map[string]string) Factory { - return &LocalFactory{ - namespace: l.newNamespace(name), - tags: l.appendTags(tags), - LocalBackend: l.LocalBackend, - } -} diff --git a/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/metrics.go index 0b97707b0..0df0c662e 100644 --- a/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/metrics.go +++ b/hotelReservation/vendor/github.com/uber/jaeger-lib/metrics/metrics.go @@ -17,23 +17,29 @@ package metrics import ( "fmt" "reflect" + "strconv" "strings" ) -// Init initializes the passed in metrics and initializes its fields using the passed in factory. -func Init(metrics interface{}, factory Factory, globalTags map[string]string) { - if err := initMetrics(metrics, factory, globalTags); err != nil { - panic(err.Error()) - } -} - -// initMetrics uses reflection to initialize a struct containing metrics fields +// MustInit initializes the passed in metrics and initializes its fields using the passed in factory. +// +// It uses reflection to initialize a struct containing metrics fields // by assigning new Counter/Gauge/Timer values with the metric name retrieved // from the `metric` tag and stats tags retrieved from the `tags` tag. // // Note: all fields of the struct must be exported, have a `metric` tag, and be // of type Counter or Gauge or Timer. -func initMetrics(m interface{}, factory Factory, globalTags map[string]string) error { +// +// Errors during Init lead to a panic. +func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) { + if err := Init(metrics, factory, globalTags); err != nil { + panic(err.Error()) + } +} + +// Init does the same as MustInit, but returns an error instead of +// panicking. +func Init(m interface{}, factory Factory, globalTags map[string]string) error { // Allow user to opt out of reporting metrics by passing in nil. if factory == nil { factory = NullFactory @@ -42,6 +48,7 @@ func initMetrics(m interface{}, factory Factory, globalTags map[string]string) e counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem() gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem() timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem() + histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem() v := reflect.ValueOf(m).Elem() t := v.Type() @@ -50,6 +57,7 @@ func initMetrics(m interface{}, factory Factory, globalTags map[string]string) e for k, v := range globalTags { tags[k] = v } + var buckets []float64 field := t.Field(i) metric := field.Tag.Get("metric") if metric == "" { @@ -67,13 +75,57 @@ func initMetrics(m interface{}, factory Factory, globalTags map[string]string) e tags[tag[0]] = tag[1] } } + if bucketString := field.Tag.Get("buckets"); bucketString != "" { + if field.Type.AssignableTo(timerPtrType) { + // TODO: Parse timer duration buckets + return fmt.Errorf( + "Field [%s]: Buckets are not currently initialized for timer metrics", + field.Name) + } else if field.Type.AssignableTo(histogramPtrType) { + bucketValues := strings.Split(bucketString, ",") + for _, bucket := range bucketValues { + b, err := strconv.ParseFloat(bucket, 64) + if err != nil { + return fmt.Errorf( + "Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]", + field.Name, bucket, bucketString) + } + buckets = append(buckets, b) + } + } else { + return fmt.Errorf( + "Field [%s]: Buckets should only be defined for Timer and Histogram metric types", + field.Name) + } + } + help := field.Tag.Get("help") var obj interface{} if field.Type.AssignableTo(counterPtrType) { - obj = factory.Counter(metric, tags) + obj = factory.Counter(Options{ + Name: metric, + Tags: tags, + Help: help, + }) } else if field.Type.AssignableTo(gaugePtrType) { - obj = factory.Gauge(metric, tags) + obj = factory.Gauge(Options{ + Name: metric, + Tags: tags, + Help: help, + }) } else if field.Type.AssignableTo(timerPtrType) { - obj = factory.Timer(metric, tags) + // TODO: Add buckets once parsed (see TODO above) + obj = factory.Timer(TimerOptions{ + Name: metric, + Tags: tags, + Help: help, + }) + } else if field.Type.AssignableTo(histogramPtrType) { + obj = factory.Histogram(HistogramOptions{ + Name: metric, + Tags: tags, + Help: help, + Buckets: buckets, + }) } else { return fmt.Errorf( "Field %s is not a pointer to timer, gauge, or counter", diff --git a/hotelReservation/vendor/github.com/xdg-go/pbkdf2/.gitignore b/hotelReservation/vendor/github.com/xdg-go/pbkdf2/.gitignore new file mode 100644 index 000000000..f1c181ec9 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/pbkdf2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/hotelReservation/vendor/github.com/xdg-go/pbkdf2/LICENSE b/hotelReservation/vendor/github.com/xdg-go/pbkdf2/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/pbkdf2/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/hotelReservation/vendor/github.com/xdg-go/pbkdf2/README.md b/hotelReservation/vendor/github.com/xdg-go/pbkdf2/README.md new file mode 100644 index 000000000..d2824e456 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/pbkdf2/README.md @@ -0,0 +1,17 @@ +[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/pbkdf2.svg)](https://pkg.go.dev/github.com/xdg-go/pbkdf2) +[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/pbkdf2)](https://goreportcard.com/report/github.com/xdg-go/pbkdf2) +[![Github Actions](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml) + +# pbkdf2 – Go implementation of PBKDF2 + +## Description + +Package pbkdf2 provides password-based key derivation based on +[RFC 8018](https://tools.ietf.org/html/rfc8018). + +## Copyright and License + +Copyright 2021 by David A. Golden. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). You may +obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/hotelReservation/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go b/hotelReservation/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go new file mode 100644 index 000000000..029945ca4 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go @@ -0,0 +1,76 @@ +// Copyright 2021 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package pbkdf2 implements password-based key derivation using the PBKDF2 +// algorithm described in RFC 2898 and RFC 8018. +// +// It provides a drop-in replacement for `golang.org/x/crypto/pbkdf2`, with +// the following benefits: +// +// - Released as a module with semantic versioning +// +// - Does not pull in dependencies for unrelated `x/crypto/*` packages +// +// - Supports Go 1.9+ +// +// See https://tools.ietf.org/html/rfc8018#section-4 for security considerations +// in the selection of a salt and iteration count. +package pbkdf2 + +import ( + "crypto/hmac" + "encoding/binary" + "hash" +) + +// Key generates a derived key from a password using the PBKDF2 algorithm. The +// inputs include salt bytes, the iteration count, desired key length, and a +// constructor for a hashing function. For example, for a 32-byte key using +// SHA-256: +// +// key := Key([]byte("trustNo1"), salt, 10000, 32, sha256.New) +func Key(password, salt []byte, iterCount, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hLen := prf.Size() + numBlocks := keyLen / hLen + // Get an extra block if keyLen is not an even number of hLen blocks. + if keyLen%hLen > 0 { + numBlocks++ + } + + Ti := make([]byte, hLen) + Uj := make([]byte, hLen) + dk := make([]byte, 0, hLen*numBlocks) + buf := make([]byte, 4) + + for i := uint32(1); i <= uint32(numBlocks); i++ { + // Initialize Uj for j == 1 from salt and block index. + // Initialize Ti = U1. + binary.BigEndian.PutUint32(buf, i) + prf.Reset() + prf.Write(salt) + prf.Write(buf) + Uj = Uj[:0] + Uj = prf.Sum(Uj) + + // Ti = U1 ^ U2 ^ ... ^ Ux + copy(Ti, Uj) + for j := 2; j <= iterCount; j++ { + prf.Reset() + prf.Write(Uj) + Uj = Uj[:0] + Uj = prf.Sum(Uj) + for k := range Uj { + Ti[k] ^= Uj[k] + } + } + + // DK = concat(T1, T2, ... Tn) + dk = append(dk, Ti...) + } + + return dk[0:keyLen] +} diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/.gitignore b/hotelReservation/vendor/github.com/xdg-go/scram/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/CHANGELOG.md b/hotelReservation/vendor/github.com/xdg-go/scram/CHANGELOG.md new file mode 100644 index 000000000..b833be5e2 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/CHANGELOG.md @@ -0,0 +1,26 @@ +# CHANGELOG + +## v1.1.2 - 2022-12-07 + +- Bump stringprep dependency to v1.0.4 for upstream CVE fix. + +## v1.1.1 - 2022-03-03 + +- Bump stringprep dependency to v1.0.3 for upstream CVE fix. + +## v1.1.0 - 2022-01-16 + +- Add SHA-512 hash generator function for convenience. + +## v1.0.2 - 2021-03-28 + +- Switch PBKDF2 dependency to github.com/xdg-go/pbkdf2 to + minimize transitive dependencies and support Go 1.9+. + +## v1.0.1 - 2021-03-27 + +- Bump stringprep dependency to v1.0.2 for Go 1.11 support. + +## v1.0.0 - 2021-03-27 + +- First release as a Go module diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/LICENSE b/hotelReservation/vendor/github.com/xdg-go/scram/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/README.md b/hotelReservation/vendor/github.com/xdg-go/scram/README.md new file mode 100644 index 000000000..3a46f5ceb --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/README.md @@ -0,0 +1,72 @@ +[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/scram.svg)](https://pkg.go.dev/github.com/xdg-go/scram) +[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/scram)](https://goreportcard.com/report/github.com/xdg-go/scram) +[![Github Actions](https://github.com/xdg-go/scram/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/scram/actions/workflows/test.yml) + +# scram – Go implementation of RFC-5802 + +## Description + +Package scram provides client and server implementations of the Salted +Challenge Response Authentication Mechanism (SCRAM) described in +[RFC-5802](https://tools.ietf.org/html/rfc5802) and +[RFC-7677](https://tools.ietf.org/html/rfc7677). + +It includes both client and server side support. + +Channel binding and extensions are not (yet) supported. + +## Examples + +### Client side + + package main + + import "github.com/xdg-go/scram" + + func main() { + // Get Client with username, password and (optional) authorization ID. + clientSHA1, err := scram.SHA1.NewClient("mulder", "trustno1", "") + if err != nil { + panic(err) + } + + // Prepare the authentication conversation. Use the empty string as the + // initial server message argument to start the conversation. + conv := clientSHA1.NewConversation() + var serverMsg string + + // Get the first message, send it and read the response. + firstMsg, err := conv.Step(serverMsg) + if err != nil { + panic(err) + } + serverMsg = sendClientMsg(firstMsg) + + // Get the second message, send it, and read the response. + secondMsg, err := conv.Step(serverMsg) + if err != nil { + panic(err) + } + serverMsg = sendClientMsg(secondMsg) + + // Validate the server's final message. We have no further message to + // send so ignore that return value. + _, err = conv.Step(serverMsg) + if err != nil { + panic(err) + } + + return + } + + func sendClientMsg(s string) string { + // A real implementation would send this to a server and read a reply. + return "" + } + +## Copyright and License + +Copyright 2018 by David A. Golden. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). You may +obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/client.go b/hotelReservation/vendor/github.com/xdg-go/scram/client.go new file mode 100644 index 000000000..5b53021b3 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/client.go @@ -0,0 +1,130 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "sync" + + "github.com/xdg-go/pbkdf2" +) + +// Client implements the client side of SCRAM authentication. It holds +// configuration values needed to initialize new client-side conversations for +// a specific username, password and authorization ID tuple. Client caches +// the computationally-expensive parts of a SCRAM conversation as described in +// RFC-5802. If repeated authentication conversations may be required for a +// user (e.g. disconnect/reconnect), the user's Client should be preserved. +// +// For security reasons, Clients have a default minimum PBKDF2 iteration count +// of 4096. If a server requests a smaller iteration count, an authentication +// conversation will error. +// +// A Client can also be used by a server application to construct the hashed +// authentication values to be stored for a new user. See StoredCredentials() +// for more. +type Client struct { + sync.RWMutex + username string + password string + authzID string + minIters int + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + cache map[KeyFactors]derivedKeys +} + +func newClient(username, password, authzID string, fcn HashGeneratorFcn) *Client { + return &Client{ + username: username, + password: password, + authzID: authzID, + minIters: 4096, + nonceGen: defaultNonceGenerator, + hashGen: fcn, + cache: make(map[KeyFactors]derivedKeys), + } +} + +// WithMinIterations changes minimum required PBKDF2 iteration count. +func (c *Client) WithMinIterations(n int) *Client { + c.Lock() + defer c.Unlock() + c.minIters = n + return c +} + +// WithNonceGenerator replaces the default nonce generator (base64 encoding of +// 24 bytes from crypto/rand) with a custom generator. This is provided for +// testing or for users with custom nonce requirements. +func (c *Client) WithNonceGenerator(ng NonceGeneratorFcn) *Client { + c.Lock() + defer c.Unlock() + c.nonceGen = ng + return c +} + +// NewConversation constructs a client-side authentication conversation. +// Conversations cannot be reused, so this must be called for each new +// authentication attempt. +func (c *Client) NewConversation() *ClientConversation { + c.RLock() + defer c.RUnlock() + return &ClientConversation{ + client: c, + nonceGen: c.nonceGen, + hashGen: c.hashGen, + minIters: c.minIters, + } +} + +func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys { + dk, ok := c.getCache(kf) + if !ok { + dk = c.computeKeys(kf) + c.setCache(kf, dk) + } + return dk +} + +// GetStoredCredentials takes a salt and iteration count structure and +// provides the values that must be stored by a server to authentication a +// user. These values are what the Server credential lookup function must +// return for a given username. +func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials { + dk := c.getDerivedKeys(kf) + return StoredCredentials{ + KeyFactors: kf, + StoredKey: dk.StoredKey, + ServerKey: dk.ServerKey, + } +} + +func (c *Client) computeKeys(kf KeyFactors) derivedKeys { + h := c.hashGen() + saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen) + clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key")) + + return derivedKeys{ + ClientKey: clientKey, + StoredKey: computeHash(c.hashGen, clientKey), + ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")), + } +} + +func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) { + c.RLock() + defer c.RUnlock() + dk, ok := c.cache[kf] + return dk, ok +} + +func (c *Client) setCache(kf KeyFactors, dk derivedKeys) { + c.Lock() + defer c.Unlock() + c.cache[kf] = dk + return +} diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/client_conv.go b/hotelReservation/vendor/github.com/xdg-go/scram/client_conv.go new file mode 100644 index 000000000..834056889 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/client_conv.go @@ -0,0 +1,149 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" + "strings" +) + +type clientState int + +const ( + clientStarting clientState = iota + clientFirst + clientFinal + clientDone +) + +// ClientConversation implements the client-side of an authentication +// conversation with a server. A new conversation must be created for +// each authentication attempt. +type ClientConversation struct { + client *Client + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + minIters int + state clientState + valid bool + gs2 string + nonce string + c1b string + serveSig []byte +} + +// Step takes a string provided from a server (or just an empty string for the +// very first conversation step) and attempts to move the authentication +// conversation forward. It returns a string to be sent to the server or an +// error if the server message is invalid. Calling Step after a conversation +// completes is also an error. +func (cc *ClientConversation) Step(challenge string) (response string, err error) { + switch cc.state { + case clientStarting: + cc.state = clientFirst + response, err = cc.firstMsg() + case clientFirst: + cc.state = clientFinal + response, err = cc.finalMsg(challenge) + case clientFinal: + cc.state = clientDone + response, err = cc.validateServer(challenge) + default: + response, err = "", errors.New("Conversation already completed") + } + return +} + +// Done returns true if the conversation is completed or has errored. +func (cc *ClientConversation) Done() bool { + return cc.state == clientDone +} + +// Valid returns true if the conversation successfully authenticated with the +// server, including counter-validation that the server actually has the +// user's stored credentials. +func (cc *ClientConversation) Valid() bool { + return cc.valid +} + +func (cc *ClientConversation) firstMsg() (string, error) { + // Values are cached for use in final message parameters + cc.gs2 = cc.gs2Header() + cc.nonce = cc.client.nonceGen() + cc.c1b = fmt.Sprintf("n=%s,r=%s", encodeName(cc.client.username), cc.nonce) + + return cc.gs2 + cc.c1b, nil +} + +func (cc *ClientConversation) finalMsg(s1 string) (string, error) { + msg, err := parseServerFirst(s1) + if err != nil { + return "", err + } + + // Check nonce prefix and update + if !strings.HasPrefix(msg.nonce, cc.nonce) { + return "", errors.New("server nonce did not extend client nonce") + } + cc.nonce = msg.nonce + + // Check iteration count vs minimum + if msg.iters < cc.minIters { + return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters) + } + + // Create client-final-message-without-proof + c2wop := fmt.Sprintf( + "c=%s,r=%s", + base64.StdEncoding.EncodeToString([]byte(cc.gs2)), + cc.nonce, + ) + + // Create auth message + authMsg := cc.c1b + "," + s1 + "," + c2wop + + // Get derived keys from client cache + dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters}) + + // Create proof as clientkey XOR clientsignature + clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg)) + clientProof := xorBytes(dk.ClientKey, clientSignature) + proof := base64.StdEncoding.EncodeToString(clientProof) + + // Cache ServerSignature for later validation + cc.serveSig = computeHMAC(cc.hashGen, dk.ServerKey, []byte(authMsg)) + + return fmt.Sprintf("%s,p=%s", c2wop, proof), nil +} + +func (cc *ClientConversation) validateServer(s2 string) (string, error) { + msg, err := parseServerFinal(s2) + if err != nil { + return "", err + } + + if len(msg.err) > 0 { + return "", fmt.Errorf("server error: %s", msg.err) + } + + if !hmac.Equal(msg.verifier, cc.serveSig) { + return "", errors.New("server validation failed") + } + + cc.valid = true + return "", nil +} + +func (cc *ClientConversation) gs2Header() string { + if cc.client.authzID == "" { + return "n,," + } + return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID)) +} diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/common.go b/hotelReservation/vendor/github.com/xdg-go/scram/common.go new file mode 100644 index 000000000..cb705cb74 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/common.go @@ -0,0 +1,97 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "strings" +) + +// NonceGeneratorFcn defines a function that returns a string of high-quality +// random printable ASCII characters EXCLUDING the comma (',') character. The +// default nonce generator provides Base64 encoding of 24 bytes from +// crypto/rand. +type NonceGeneratorFcn func() string + +// derivedKeys collects the three cryptographically derived values +// into one struct for caching. +type derivedKeys struct { + ClientKey []byte + StoredKey []byte + ServerKey []byte +} + +// KeyFactors represent the two server-provided factors needed to compute +// client credentials for authentication. Salt is decoded bytes (i.e. not +// base64), but in string form so that KeyFactors can be used as a map key for +// cached credentials. +type KeyFactors struct { + Salt string + Iters int +} + +// StoredCredentials are the values that a server must store for a given +// username to allow authentication. They include the salt and iteration +// count, plus the derived values to authenticate a client and for the server +// to authenticate itself back to the client. +// +// NOTE: these are specific to a given hash function. To allow a user to +// authenticate with either SCRAM-SHA-1 or SCRAM-SHA-256, two sets of +// StoredCredentials must be created and stored, one for each hash function. +type StoredCredentials struct { + KeyFactors + StoredKey []byte + ServerKey []byte +} + +// CredentialLookup is a callback to provide StoredCredentials for a given +// username. This is used to configure Server objects. +// +// NOTE: these are specific to a given hash function. The callback provided +// to a Server with a given hash function must provide the corresponding +// StoredCredentials. +type CredentialLookup func(string) (StoredCredentials, error) + +func defaultNonceGenerator() string { + raw := make([]byte, 24) + nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) + rand.Read(raw) + base64.StdEncoding.Encode(nonce, raw) + return string(nonce) +} + +func encodeName(s string) string { + return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1) +} + +func decodeName(s string) (string, error) { + // TODO Check for = not followed by 2C or 3D + return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil +} + +func computeHash(hg HashGeneratorFcn, b []byte) []byte { + h := hg() + h.Write(b) + return h.Sum(nil) +} + +func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte { + mac := hmac.New(hg, key) + mac.Write(data) + return mac.Sum(nil) +} + +func xorBytes(a, b []byte) []byte { + // TODO check a & b are same length, or just xor to smallest + xor := make([]byte, len(a)) + for i := range a { + xor[i] = a[i] ^ b[i] + } + return xor +} diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/doc.go b/hotelReservation/vendor/github.com/xdg-go/scram/doc.go new file mode 100644 index 000000000..82e8aeed8 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/doc.go @@ -0,0 +1,26 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package scram provides client and server implementations of the Salted +// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802 +// and RFC-7677. +// +// Usage +// +// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that +// are used to construct Client or Server objects. +// +// clientSHA1, err := scram.SHA1.NewClient(username, password, authID) +// clientSHA256, err := scram.SHA256.NewClient(username, password, authID) +// clientSHA512, err := scram.SHA512.NewClient(username, password, authID) +// +// serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) +// serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) +// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn) +// +// These objects are used to construct ClientConversation or +// ServerConversation objects that are used to carry out authentication. +package scram diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/parse.go b/hotelReservation/vendor/github.com/xdg-go/scram/parse.go new file mode 100644 index 000000000..722f6043d --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/parse.go @@ -0,0 +1,205 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" +) + +type c1Msg struct { + gs2Header string + authzID string + username string + nonce string + c1b string +} + +type c2Msg struct { + cbind []byte + nonce string + proof []byte + c2wop string +} + +type s1Msg struct { + nonce string + salt []byte + iters int +} + +type s2Msg struct { + verifier []byte + err string +} + +func parseField(s, k string) (string, error) { + t := strings.TrimPrefix(s, k+"=") + if t == s { + return "", fmt.Errorf("error parsing '%s' for field '%s'", s, k) + } + return t, nil +} + +func parseGS2Flag(s string) (string, error) { + if s[0] == 'p' { + return "", fmt.Errorf("channel binding requested but not supported") + } + + if s == "n" || s == "y" { + return s, nil + } + + return "", fmt.Errorf("error parsing '%s' for gs2 flag", s) +} + +func parseFieldBase64(s, k string) ([]byte, error) { + raw, err := parseField(s, k) + if err != nil { + return nil, err + } + + dec, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + return dec, nil +} + +func parseFieldInt(s, k string) (int, error) { + raw, err := parseField(s, k) + if err != nil { + return 0, err + } + + num, err := strconv.Atoi(raw) + if err != nil { + return 0, fmt.Errorf("error parsing field '%s': %v", k, err) + } + + return num, nil +} + +func parseClientFirst(c1 string) (msg c1Msg, err error) { + + fields := strings.Split(c1, ",") + if len(fields) < 4 { + err = errors.New("not enough fields in first server message") + return + } + + gs2flag, err := parseGS2Flag(fields[0]) + if err != nil { + return + } + + // 'a' field is optional + if len(fields[1]) > 0 { + msg.authzID, err = parseField(fields[1], "a") + if err != nil { + return + } + } + + // Recombine and save the gs2 header + msg.gs2Header = gs2flag + "," + msg.authzID + "," + + // Check for unsupported extensions field "m". + if strings.HasPrefix(fields[2], "m=") { + err = errors.New("SCRAM message extensions are not supported") + return + } + + msg.username, err = parseField(fields[2], "n") + if err != nil { + return + } + + msg.nonce, err = parseField(fields[3], "r") + if err != nil { + return + } + + msg.c1b = strings.Join(fields[2:], ",") + + return +} + +func parseClientFinal(c2 string) (msg c2Msg, err error) { + fields := strings.Split(c2, ",") + if len(fields) < 3 { + err = errors.New("not enough fields in first server message") + return + } + + msg.cbind, err = parseFieldBase64(fields[0], "c") + if err != nil { + return + } + + msg.nonce, err = parseField(fields[1], "r") + if err != nil { + return + } + + // Extension fields may come between nonce and proof, so we + // grab the *last* fields as proof. + msg.proof, err = parseFieldBase64(fields[len(fields)-1], "p") + if err != nil { + return + } + + msg.c2wop = c2[:strings.LastIndex(c2, ",")] + + return +} + +func parseServerFirst(s1 string) (msg s1Msg, err error) { + + // Check for unsupported extensions field "m". + if strings.HasPrefix(s1, "m=") { + err = errors.New("SCRAM message extensions are not supported") + return + } + + fields := strings.Split(s1, ",") + if len(fields) < 3 { + err = errors.New("not enough fields in first server message") + return + } + + msg.nonce, err = parseField(fields[0], "r") + if err != nil { + return + } + + msg.salt, err = parseFieldBase64(fields[1], "s") + if err != nil { + return + } + + msg.iters, err = parseFieldInt(fields[2], "i") + + return +} + +func parseServerFinal(s2 string) (msg s2Msg, err error) { + fields := strings.Split(s2, ",") + + msg.verifier, err = parseFieldBase64(fields[0], "v") + if err == nil { + return + } + + msg.err, err = parseField(fields[0], "e") + + return +} diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/scram.go b/hotelReservation/vendor/github.com/xdg-go/scram/scram.go new file mode 100644 index 000000000..a7b366027 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/scram.go @@ -0,0 +1,71 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + "github.com/xdg-go/stringprep" +) + +// HashGeneratorFcn abstracts a factory function that returns a hash.Hash +// value to be used for SCRAM operations. Generally, one would use the +// provided package variables, `scram.SHA1` and `scram.SHA256`, for the most +// common forms of SCRAM. +type HashGeneratorFcn func() hash.Hash + +// SHA1 is a function that returns a crypto/sha1 hasher and should be used to +// create Client objects configured for SHA-1 hashing. +var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } + +// SHA256 is a function that returns a crypto/sha256 hasher and should be used +// to create Client objects configured for SHA-256 hashing. +var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } + +// SHA512 is a function that returns a crypto/sha512 hasher and should be used +// to create Client objects configured for SHA-512 hashing. +var SHA512 HashGeneratorFcn = func() hash.Hash { return sha512.New() } + +// NewClient constructs a SCRAM client component based on a given hash.Hash +// factory receiver. This constructor will normalize the username, password +// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If +// SASLprep fails, the method returns an error. +func (f HashGeneratorFcn) NewClient(username, password, authzID string) (*Client, error) { + var userprep, passprep, authprep string + var err error + + if userprep, err = stringprep.SASLprep.Prepare(username); err != nil { + return nil, fmt.Errorf("Error SASLprepping username '%s': %v", username, err) + } + if passprep, err = stringprep.SASLprep.Prepare(password); err != nil { + return nil, fmt.Errorf("Error SASLprepping password '%s': %v", password, err) + } + if authprep, err = stringprep.SASLprep.Prepare(authzID); err != nil { + return nil, fmt.Errorf("Error SASLprepping authzID '%s': %v", authzID, err) + } + + return newClient(userprep, passprep, authprep, f), nil +} + +// NewClientUnprepped acts like NewClient, except none of the arguments will +// be normalized via SASLprep. This is not generally recommended, but is +// provided for users that may have custom normalization needs. +func (f HashGeneratorFcn) NewClientUnprepped(username, password, authzID string) (*Client, error) { + return newClient(username, password, authzID, f), nil +} + +// NewServer constructs a SCRAM server component based on a given hash.Hash +// factory receiver. To be maximally generic, it uses dependency injection to +// handle credential lookup, which is the process of turning a username string +// into a struct with stored credentials for authentication. +func (f HashGeneratorFcn) NewServer(cl CredentialLookup) (*Server, error) { + return newServer(cl, f) +} diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/server.go b/hotelReservation/vendor/github.com/xdg-go/scram/server.go new file mode 100644 index 000000000..b119b3615 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/server.go @@ -0,0 +1,50 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import "sync" + +// Server implements the server side of SCRAM authentication. It holds +// configuration values needed to initialize new server-side conversations. +// Generally, this can be persistent within an application. +type Server struct { + sync.RWMutex + credentialCB CredentialLookup + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn +} + +func newServer(cl CredentialLookup, fcn HashGeneratorFcn) (*Server, error) { + return &Server{ + credentialCB: cl, + nonceGen: defaultNonceGenerator, + hashGen: fcn, + }, nil +} + +// WithNonceGenerator replaces the default nonce generator (base64 encoding of +// 24 bytes from crypto/rand) with a custom generator. This is provided for +// testing or for users with custom nonce requirements. +func (s *Server) WithNonceGenerator(ng NonceGeneratorFcn) *Server { + s.Lock() + defer s.Unlock() + s.nonceGen = ng + return s +} + +// NewConversation constructs a server-side authentication conversation. +// Conversations cannot be reused, so this must be called for each new +// authentication attempt. +func (s *Server) NewConversation() *ServerConversation { + s.RLock() + defer s.RUnlock() + return &ServerConversation{ + nonceGen: s.nonceGen, + hashGen: s.hashGen, + credentialCB: s.credentialCB, + } +} diff --git a/hotelReservation/vendor/github.com/xdg-go/scram/server_conv.go b/hotelReservation/vendor/github.com/xdg-go/scram/server_conv.go new file mode 100644 index 000000000..9c8838c38 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/scram/server_conv.go @@ -0,0 +1,151 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" +) + +type serverState int + +const ( + serverFirst serverState = iota + serverFinal + serverDone +) + +// ServerConversation implements the server-side of an authentication +// conversation with a client. A new conversation must be created for +// each authentication attempt. +type ServerConversation struct { + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + credentialCB CredentialLookup + state serverState + credential StoredCredentials + valid bool + gs2Header string + username string + authzID string + nonce string + c1b string + s1 string +} + +// Step takes a string provided from a client and attempts to move the +// authentication conversation forward. It returns a string to be sent to the +// client or an error if the client message is invalid. Calling Step after a +// conversation completes is also an error. +func (sc *ServerConversation) Step(challenge string) (response string, err error) { + switch sc.state { + case serverFirst: + sc.state = serverFinal + response, err = sc.firstMsg(challenge) + case serverFinal: + sc.state = serverDone + response, err = sc.finalMsg(challenge) + default: + response, err = "", errors.New("Conversation already completed") + } + return +} + +// Done returns true if the conversation is completed or has errored. +func (sc *ServerConversation) Done() bool { + return sc.state == serverDone +} + +// Valid returns true if the conversation successfully authenticated the +// client. +func (sc *ServerConversation) Valid() bool { + return sc.valid +} + +// Username returns the client-provided username. This is valid to call +// if the first conversation Step() is successful. +func (sc *ServerConversation) Username() string { + return sc.username +} + +// AuthzID returns the (optional) client-provided authorization identity, if +// any. If one was not provided, it returns the empty string. This is valid +// to call if the first conversation Step() is successful. +func (sc *ServerConversation) AuthzID() string { + return sc.authzID +} + +func (sc *ServerConversation) firstMsg(c1 string) (string, error) { + msg, err := parseClientFirst(c1) + if err != nil { + sc.state = serverDone + return "", err + } + + sc.gs2Header = msg.gs2Header + sc.username = msg.username + sc.authzID = msg.authzID + + sc.credential, err = sc.credentialCB(msg.username) + if err != nil { + sc.state = serverDone + return "e=unknown-user", err + } + + sc.nonce = msg.nonce + sc.nonceGen() + sc.c1b = msg.c1b + sc.s1 = fmt.Sprintf("r=%s,s=%s,i=%d", + sc.nonce, + base64.StdEncoding.EncodeToString([]byte(sc.credential.Salt)), + sc.credential.Iters, + ) + + return sc.s1, nil +} + +// For errors, returns server error message as well as non-nil error. Callers +// can choose whether to send server error or not. +func (sc *ServerConversation) finalMsg(c2 string) (string, error) { + msg, err := parseClientFinal(c2) + if err != nil { + return "", err + } + + // Check channel binding matches what we expect; in this case, we expect + // just the gs2 header we received as we don't support channel binding + // with a data payload. If we add binding, we need to independently + // compute the header to match here. + if string(msg.cbind) != sc.gs2Header { + return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header) + } + + // Check nonce received matches what we sent + if msg.nonce != sc.nonce { + return "e=other-error", errors.New("nonce received did not match nonce sent") + } + + // Create auth message + authMsg := sc.c1b + "," + sc.s1 + "," + msg.c2wop + + // Retrieve ClientKey from proof and verify it + clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg)) + clientKey := xorBytes([]byte(msg.proof), clientSignature) + storedKey := computeHash(sc.hashGen, clientKey) + + // Compare with constant-time function + if !hmac.Equal(storedKey, sc.credential.StoredKey) { + return "e=invalid-proof", errors.New("challenge proof invalid") + } + + sc.valid = true + + // Compute and return server verifier + serverSignature := computeHMAC(sc.hashGen, sc.credential.ServerKey, []byte(authMsg)) + return "v=" + base64.StdEncoding.EncodeToString(serverSignature), nil +} diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/.gitignore b/hotelReservation/vendor/github.com/xdg-go/stringprep/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/hotelReservation/vendor/github.com/xdg-go/stringprep/CHANGELOG.md new file mode 100644 index 000000000..04b9753cd --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/CHANGELOG.md @@ -0,0 +1,36 @@ +# CHANGELOG + + +## [v1.0.4] - 2022-12-07 + +### Maintenance + +- Bump golang.org/x/text to v0.3.8 due to CVE-2022-32149 + + +## [v1.0.3] - 2022-03-01 + +### Maintenance + +- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561 + + +## [v1.0.2] - 2021-03-27 + +### Maintenance + +- Change minimum Go version to 1.11 + + +## [v1.0.1] - 2021-03-24 + +### Bug Fixes + +- Add go.mod file + + +## [v1.0.0] - 2018-02-21 + +[v1.0.2]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.2 +[v1.0.1]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.1 +[v1.0.0]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.0 diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/LICENSE b/hotelReservation/vendor/github.com/xdg-go/stringprep/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/README.md b/hotelReservation/vendor/github.com/xdg-go/stringprep/README.md new file mode 100644 index 000000000..83ea5346d --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/README.md @@ -0,0 +1,28 @@ +[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/stringprep.svg)](https://pkg.go.dev/github.com/xdg-go/stringprep) +[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/stringprep)](https://goreportcard.com/report/github.com/xdg-go/stringprep) +[![Github Actions](https://github.com/xdg-go/stringprep/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/stringprep/actions/workflows/test.yml) + +# stringprep – Go implementation of RFC-3454 stringprep and RFC-4013 SASLprep + +## Synopsis + +``` + import "github.com/xdg-go/stringprep" + + prepped := stringprep.SASLprep.Prepare("TrustNô1") + +``` + +## Description + +This library provides an implementation of the stringprep algorithm +(RFC-3454) in Go, including all data tables. + +A pre-built SASLprep (RFC-4013) profile is provided as well. + +## Copyright and License + +Copyright 2018 by David A. Golden. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). You may +obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/bidi.go b/hotelReservation/vendor/github.com/xdg-go/stringprep/bidi.go new file mode 100644 index 000000000..6f6d321df --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/bidi.go @@ -0,0 +1,73 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +var errHasLCat = "BiDi string can't have runes from category L" +var errFirstRune = "BiDi string first rune must have category R or AL" +var errLastRune = "BiDi string last rune must have category R or AL" + +// Check for prohibited characters from table C.8 +func checkBiDiProhibitedRune(s string) error { + for _, r := range s { + if TableC8.Contains(r) { + return Error{Msg: errProhibited, Rune: r} + } + } + return nil +} + +// Check for LCat characters from table D.2 +func checkBiDiLCat(s string) error { + for _, r := range s { + if TableD2.Contains(r) { + return Error{Msg: errHasLCat, Rune: r} + } + } + return nil +} + +// Check first and last characters are in table D.1; requires non-empty string +func checkBadFirstAndLastRandALCat(s string) error { + rs := []rune(s) + if !TableD1.Contains(rs[0]) { + return Error{Msg: errFirstRune, Rune: rs[0]} + } + n := len(rs) - 1 + if !TableD1.Contains(rs[n]) { + return Error{Msg: errLastRune, Rune: rs[n]} + } + return nil +} + +// Look for RandALCat characters from table D.1 +func hasBiDiRandALCat(s string) bool { + for _, r := range s { + if TableD1.Contains(r) { + return true + } + } + return false +} + +// Check that BiDi rules are satisfied ; let empty string pass this rule +func passesBiDiRules(s string) error { + if len(s) == 0 { + return nil + } + if err := checkBiDiProhibitedRune(s); err != nil { + return err + } + if hasBiDiRandALCat(s) { + if err := checkBiDiLCat(s); err != nil { + return err + } + if err := checkBadFirstAndLastRandALCat(s); err != nil { + return err + } + } + return nil +} diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/doc.go b/hotelReservation/vendor/github.com/xdg-go/stringprep/doc.go new file mode 100644 index 000000000..b319e081b --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/doc.go @@ -0,0 +1,10 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package stringprep provides data tables and algorithms for RFC-3454, +// including errata (as of 2018-02). It also provides a profile for +// SASLprep as defined in RFC-4013. +package stringprep diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/error.go b/hotelReservation/vendor/github.com/xdg-go/stringprep/error.go new file mode 100644 index 000000000..7403e4991 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/error.go @@ -0,0 +1,14 @@ +package stringprep + +import "fmt" + +// Error describes problems encountered during stringprep, including what rune +// was problematic. +type Error struct { + Msg string + Rune rune +} + +func (e Error) Error() string { + return fmt.Sprintf("%s (rune: '\\u%04x')", e.Msg, e.Rune) +} diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/map.go b/hotelReservation/vendor/github.com/xdg-go/stringprep/map.go new file mode 100644 index 000000000..e56a0dd43 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/map.go @@ -0,0 +1,21 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +// Mapping represents a stringprep mapping, from a single rune to zero or more +// runes. +type Mapping map[rune][]rune + +// Map maps a rune to a (possibly empty) rune slice via a stringprep Mapping. +// The ok return value is false if the rune was not found. +func (m Mapping) Map(r rune) (replacement []rune, ok bool) { + rs, ok := m[r] + if !ok { + return nil, false + } + return rs, true +} diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/profile.go b/hotelReservation/vendor/github.com/xdg-go/stringprep/profile.go new file mode 100644 index 000000000..5a73be9e5 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/profile.go @@ -0,0 +1,75 @@ +package stringprep + +import ( + "golang.org/x/text/unicode/norm" +) + +// Profile represents a stringprep profile. +type Profile struct { + Mappings []Mapping + Normalize bool + Prohibits []Set + CheckBiDi bool +} + +var errProhibited = "prohibited character" + +// Prepare transforms an input string to an output string following +// the rules defined in the profile as defined by RFC-3454. +func (p Profile) Prepare(s string) (string, error) { + // Optimistically, assume output will be same length as input + temp := make([]rune, 0, len(s)) + + // Apply maps + for _, r := range s { + rs, ok := p.applyMaps(r) + if ok { + temp = append(temp, rs...) + } else { + temp = append(temp, r) + } + } + + // Normalize + var out string + if p.Normalize { + out = norm.NFKC.String(string(temp)) + } else { + out = string(temp) + } + + // Check prohibited + for _, r := range out { + if p.runeIsProhibited(r) { + return "", Error{Msg: errProhibited, Rune: r} + } + } + + // Check BiDi allowed + if p.CheckBiDi { + if err := passesBiDiRules(out); err != nil { + return "", err + } + } + + return out, nil +} + +func (p Profile) applyMaps(r rune) ([]rune, bool) { + for _, m := range p.Mappings { + rs, ok := m.Map(r) + if ok { + return rs, true + } + } + return nil, false +} + +func (p Profile) runeIsProhibited(r rune) bool { + for _, s := range p.Prohibits { + if s.Contains(r) { + return true + } + } + return false +} diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/saslprep.go b/hotelReservation/vendor/github.com/xdg-go/stringprep/saslprep.go new file mode 100644 index 000000000..40013488b --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/saslprep.go @@ -0,0 +1,52 @@ +package stringprep + +var mapNonASCIISpaceToASCIISpace = Mapping{ + 0x00A0: []rune{0x0020}, + 0x1680: []rune{0x0020}, + 0x2000: []rune{0x0020}, + 0x2001: []rune{0x0020}, + 0x2002: []rune{0x0020}, + 0x2003: []rune{0x0020}, + 0x2004: []rune{0x0020}, + 0x2005: []rune{0x0020}, + 0x2006: []rune{0x0020}, + 0x2007: []rune{0x0020}, + 0x2008: []rune{0x0020}, + 0x2009: []rune{0x0020}, + 0x200A: []rune{0x0020}, + 0x200B: []rune{0x0020}, + 0x202F: []rune{0x0020}, + 0x205F: []rune{0x0020}, + 0x3000: []rune{0x0020}, +} + +// SASLprep is a pre-defined stringprep profile for user names and passwords +// as described in RFC-4013. +// +// Because the stringprep distinction between query and stored strings was +// intended for compatibility across profile versions, but SASLprep was never +// updated and is now deprecated, this profile only operates in stored +// strings mode, prohibiting unassigned code points. +var SASLprep Profile = saslprep + +var saslprep = Profile{ + Mappings: []Mapping{ + TableB1, + mapNonASCIISpaceToASCIISpace, + }, + Normalize: true, + Prohibits: []Set{ + TableA1, + TableC1_2, + TableC2_1, + TableC2_2, + TableC3, + TableC4, + TableC5, + TableC6, + TableC7, + TableC8, + TableC9, + }, + CheckBiDi: true, +} diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/set.go b/hotelReservation/vendor/github.com/xdg-go/stringprep/set.go new file mode 100644 index 000000000..c837e28c8 --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/set.go @@ -0,0 +1,36 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +import "sort" + +// RuneRange represents a close-ended range of runes: [N,M]. For a range +// consisting of a single rune, N and M will be equal. +type RuneRange [2]rune + +// Contains returns true if a rune is within the bounds of the RuneRange. +func (rr RuneRange) Contains(r rune) bool { + return rr[0] <= r && r <= rr[1] +} + +func (rr RuneRange) isAbove(r rune) bool { + return r <= rr[0] +} + +// Set represents a stringprep data table used to identify runes of a +// particular type. +type Set []RuneRange + +// Contains returns true if a rune is within any of the RuneRanges in the +// Set. +func (s Set) Contains(r rune) bool { + i := sort.Search(len(s), func(i int) bool { return s[i].Contains(r) || s[i].isAbove(r) }) + if i < len(s) && s[i].Contains(r) { + return true + } + return false +} diff --git a/hotelReservation/vendor/github.com/xdg-go/stringprep/tables.go b/hotelReservation/vendor/github.com/xdg-go/stringprep/tables.go new file mode 100644 index 000000000..c3fc1fa0f --- /dev/null +++ b/hotelReservation/vendor/github.com/xdg-go/stringprep/tables.go @@ -0,0 +1,3215 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +var tableA1 = Set{ + RuneRange{0x0221, 0x0221}, + RuneRange{0x0234, 0x024F}, + RuneRange{0x02AE, 0x02AF}, + RuneRange{0x02EF, 0x02FF}, + RuneRange{0x0350, 0x035F}, + RuneRange{0x0370, 0x0373}, + RuneRange{0x0376, 0x0379}, + RuneRange{0x037B, 0x037D}, + RuneRange{0x037F, 0x0383}, + RuneRange{0x038B, 0x038B}, + RuneRange{0x038D, 0x038D}, + RuneRange{0x03A2, 0x03A2}, + RuneRange{0x03CF, 0x03CF}, + RuneRange{0x03F7, 0x03FF}, + RuneRange{0x0487, 0x0487}, + RuneRange{0x04CF, 0x04CF}, + RuneRange{0x04F6, 0x04F7}, + RuneRange{0x04FA, 0x04FF}, + RuneRange{0x0510, 0x0530}, + RuneRange{0x0557, 0x0558}, + RuneRange{0x0560, 0x0560}, + RuneRange{0x0588, 0x0588}, + RuneRange{0x058B, 0x0590}, + RuneRange{0x05A2, 0x05A2}, + RuneRange{0x05BA, 0x05BA}, + RuneRange{0x05C5, 0x05CF}, + RuneRange{0x05EB, 0x05EF}, + RuneRange{0x05F5, 0x060B}, + RuneRange{0x060D, 0x061A}, + RuneRange{0x061C, 0x061E}, + RuneRange{0x0620, 0x0620}, + RuneRange{0x063B, 0x063F}, + RuneRange{0x0656, 0x065F}, + RuneRange{0x06EE, 0x06EF}, + RuneRange{0x06FF, 0x06FF}, + RuneRange{0x070E, 0x070E}, + RuneRange{0x072D, 0x072F}, + RuneRange{0x074B, 0x077F}, + RuneRange{0x07B2, 0x0900}, + RuneRange{0x0904, 0x0904}, + RuneRange{0x093A, 0x093B}, + RuneRange{0x094E, 0x094F}, + RuneRange{0x0955, 0x0957}, + RuneRange{0x0971, 0x0980}, + RuneRange{0x0984, 0x0984}, + RuneRange{0x098D, 0x098E}, + RuneRange{0x0991, 0x0992}, + RuneRange{0x09A9, 0x09A9}, + RuneRange{0x09B1, 0x09B1}, + RuneRange{0x09B3, 0x09B5}, + RuneRange{0x09BA, 0x09BB}, + RuneRange{0x09BD, 0x09BD}, + RuneRange{0x09C5, 0x09C6}, + RuneRange{0x09C9, 0x09CA}, + RuneRange{0x09CE, 0x09D6}, + RuneRange{0x09D8, 0x09DB}, + RuneRange{0x09DE, 0x09DE}, + RuneRange{0x09E4, 0x09E5}, + RuneRange{0x09FB, 0x0A01}, + RuneRange{0x0A03, 0x0A04}, + RuneRange{0x0A0B, 0x0A0E}, + RuneRange{0x0A11, 0x0A12}, + RuneRange{0x0A29, 0x0A29}, + RuneRange{0x0A31, 0x0A31}, + RuneRange{0x0A34, 0x0A34}, + RuneRange{0x0A37, 0x0A37}, + RuneRange{0x0A3A, 0x0A3B}, + RuneRange{0x0A3D, 0x0A3D}, + RuneRange{0x0A43, 0x0A46}, + RuneRange{0x0A49, 0x0A4A}, + RuneRange{0x0A4E, 0x0A58}, + RuneRange{0x0A5D, 0x0A5D}, + RuneRange{0x0A5F, 0x0A65}, + RuneRange{0x0A75, 0x0A80}, + RuneRange{0x0A84, 0x0A84}, + RuneRange{0x0A8C, 0x0A8C}, + RuneRange{0x0A8E, 0x0A8E}, + RuneRange{0x0A92, 0x0A92}, + RuneRange{0x0AA9, 0x0AA9}, + RuneRange{0x0AB1, 0x0AB1}, + RuneRange{0x0AB4, 0x0AB4}, + RuneRange{0x0ABA, 0x0ABB}, + RuneRange{0x0AC6, 0x0AC6}, + RuneRange{0x0ACA, 0x0ACA}, + RuneRange{0x0ACE, 0x0ACF}, + RuneRange{0x0AD1, 0x0ADF}, + RuneRange{0x0AE1, 0x0AE5}, + RuneRange{0x0AF0, 0x0B00}, + RuneRange{0x0B04, 0x0B04}, + RuneRange{0x0B0D, 0x0B0E}, + RuneRange{0x0B11, 0x0B12}, + RuneRange{0x0B29, 0x0B29}, + RuneRange{0x0B31, 0x0B31}, + RuneRange{0x0B34, 0x0B35}, + RuneRange{0x0B3A, 0x0B3B}, + RuneRange{0x0B44, 0x0B46}, + RuneRange{0x0B49, 0x0B4A}, + RuneRange{0x0B4E, 0x0B55}, + RuneRange{0x0B58, 0x0B5B}, + RuneRange{0x0B5E, 0x0B5E}, + RuneRange{0x0B62, 0x0B65}, + RuneRange{0x0B71, 0x0B81}, + RuneRange{0x0B84, 0x0B84}, + RuneRange{0x0B8B, 0x0B8D}, + RuneRange{0x0B91, 0x0B91}, + RuneRange{0x0B96, 0x0B98}, + RuneRange{0x0B9B, 0x0B9B}, + RuneRange{0x0B9D, 0x0B9D}, + RuneRange{0x0BA0, 0x0BA2}, + RuneRange{0x0BA5, 0x0BA7}, + RuneRange{0x0BAB, 0x0BAD}, + RuneRange{0x0BB6, 0x0BB6}, + RuneRange{0x0BBA, 0x0BBD}, + RuneRange{0x0BC3, 0x0BC5}, + RuneRange{0x0BC9, 0x0BC9}, + RuneRange{0x0BCE, 0x0BD6}, + RuneRange{0x0BD8, 0x0BE6}, + RuneRange{0x0BF3, 0x0C00}, + RuneRange{0x0C04, 0x0C04}, + RuneRange{0x0C0D, 0x0C0D}, + RuneRange{0x0C11, 0x0C11}, + RuneRange{0x0C29, 0x0C29}, + RuneRange{0x0C34, 0x0C34}, + RuneRange{0x0C3A, 0x0C3D}, + RuneRange{0x0C45, 0x0C45}, + RuneRange{0x0C49, 0x0C49}, + RuneRange{0x0C4E, 0x0C54}, + RuneRange{0x0C57, 0x0C5F}, + RuneRange{0x0C62, 0x0C65}, + RuneRange{0x0C70, 0x0C81}, + RuneRange{0x0C84, 0x0C84}, + RuneRange{0x0C8D, 0x0C8D}, + RuneRange{0x0C91, 0x0C91}, + RuneRange{0x0CA9, 0x0CA9}, + RuneRange{0x0CB4, 0x0CB4}, + RuneRange{0x0CBA, 0x0CBD}, + RuneRange{0x0CC5, 0x0CC5}, + RuneRange{0x0CC9, 0x0CC9}, + RuneRange{0x0CCE, 0x0CD4}, + RuneRange{0x0CD7, 0x0CDD}, + RuneRange{0x0CDF, 0x0CDF}, + RuneRange{0x0CE2, 0x0CE5}, + RuneRange{0x0CF0, 0x0D01}, + RuneRange{0x0D04, 0x0D04}, + RuneRange{0x0D0D, 0x0D0D}, + RuneRange{0x0D11, 0x0D11}, + RuneRange{0x0D29, 0x0D29}, + RuneRange{0x0D3A, 0x0D3D}, + RuneRange{0x0D44, 0x0D45}, + RuneRange{0x0D49, 0x0D49}, + RuneRange{0x0D4E, 0x0D56}, + RuneRange{0x0D58, 0x0D5F}, + RuneRange{0x0D62, 0x0D65}, + RuneRange{0x0D70, 0x0D81}, + RuneRange{0x0D84, 0x0D84}, + RuneRange{0x0D97, 0x0D99}, + RuneRange{0x0DB2, 0x0DB2}, + RuneRange{0x0DBC, 0x0DBC}, + RuneRange{0x0DBE, 0x0DBF}, + RuneRange{0x0DC7, 0x0DC9}, + RuneRange{0x0DCB, 0x0DCE}, + RuneRange{0x0DD5, 0x0DD5}, + RuneRange{0x0DD7, 0x0DD7}, + RuneRange{0x0DE0, 0x0DF1}, + RuneRange{0x0DF5, 0x0E00}, + RuneRange{0x0E3B, 0x0E3E}, + RuneRange{0x0E5C, 0x0E80}, + RuneRange{0x0E83, 0x0E83}, + RuneRange{0x0E85, 0x0E86}, + RuneRange{0x0E89, 0x0E89}, + RuneRange{0x0E8B, 0x0E8C}, + RuneRange{0x0E8E, 0x0E93}, + RuneRange{0x0E98, 0x0E98}, + RuneRange{0x0EA0, 0x0EA0}, + RuneRange{0x0EA4, 0x0EA4}, + RuneRange{0x0EA6, 0x0EA6}, + RuneRange{0x0EA8, 0x0EA9}, + RuneRange{0x0EAC, 0x0EAC}, + RuneRange{0x0EBA, 0x0EBA}, + RuneRange{0x0EBE, 0x0EBF}, + RuneRange{0x0EC5, 0x0EC5}, + RuneRange{0x0EC7, 0x0EC7}, + RuneRange{0x0ECE, 0x0ECF}, + RuneRange{0x0EDA, 0x0EDB}, + RuneRange{0x0EDE, 0x0EFF}, + RuneRange{0x0F48, 0x0F48}, + RuneRange{0x0F6B, 0x0F70}, + RuneRange{0x0F8C, 0x0F8F}, + RuneRange{0x0F98, 0x0F98}, + RuneRange{0x0FBD, 0x0FBD}, + RuneRange{0x0FCD, 0x0FCE}, + RuneRange{0x0FD0, 0x0FFF}, + RuneRange{0x1022, 0x1022}, + RuneRange{0x1028, 0x1028}, + RuneRange{0x102B, 0x102B}, + RuneRange{0x1033, 0x1035}, + RuneRange{0x103A, 0x103F}, + RuneRange{0x105A, 0x109F}, + RuneRange{0x10C6, 0x10CF}, + RuneRange{0x10F9, 0x10FA}, + RuneRange{0x10FC, 0x10FF}, + RuneRange{0x115A, 0x115E}, + RuneRange{0x11A3, 0x11A7}, + RuneRange{0x11FA, 0x11FF}, + RuneRange{0x1207, 0x1207}, + RuneRange{0x1247, 0x1247}, + RuneRange{0x1249, 0x1249}, + RuneRange{0x124E, 0x124F}, + RuneRange{0x1257, 0x1257}, + RuneRange{0x1259, 0x1259}, + RuneRange{0x125E, 0x125F}, + RuneRange{0x1287, 0x1287}, + RuneRange{0x1289, 0x1289}, + RuneRange{0x128E, 0x128F}, + RuneRange{0x12AF, 0x12AF}, + RuneRange{0x12B1, 0x12B1}, + RuneRange{0x12B6, 0x12B7}, + RuneRange{0x12BF, 0x12BF}, + RuneRange{0x12C1, 0x12C1}, + RuneRange{0x12C6, 0x12C7}, + RuneRange{0x12CF, 0x12CF}, + RuneRange{0x12D7, 0x12D7}, + RuneRange{0x12EF, 0x12EF}, + RuneRange{0x130F, 0x130F}, + RuneRange{0x1311, 0x1311}, + RuneRange{0x1316, 0x1317}, + RuneRange{0x131F, 0x131F}, + RuneRange{0x1347, 0x1347}, + RuneRange{0x135B, 0x1360}, + RuneRange{0x137D, 0x139F}, + RuneRange{0x13F5, 0x1400}, + RuneRange{0x1677, 0x167F}, + RuneRange{0x169D, 0x169F}, + RuneRange{0x16F1, 0x16FF}, + RuneRange{0x170D, 0x170D}, + RuneRange{0x1715, 0x171F}, + RuneRange{0x1737, 0x173F}, + RuneRange{0x1754, 0x175F}, + RuneRange{0x176D, 0x176D}, + RuneRange{0x1771, 0x1771}, + RuneRange{0x1774, 0x177F}, + RuneRange{0x17DD, 0x17DF}, + RuneRange{0x17EA, 0x17FF}, + RuneRange{0x180F, 0x180F}, + RuneRange{0x181A, 0x181F}, + RuneRange{0x1878, 0x187F}, + RuneRange{0x18AA, 0x1DFF}, + RuneRange{0x1E9C, 0x1E9F}, + RuneRange{0x1EFA, 0x1EFF}, + RuneRange{0x1F16, 0x1F17}, + RuneRange{0x1F1E, 0x1F1F}, + RuneRange{0x1F46, 0x1F47}, + RuneRange{0x1F4E, 0x1F4F}, + RuneRange{0x1F58, 0x1F58}, + RuneRange{0x1F5A, 0x1F5A}, + RuneRange{0x1F5C, 0x1F5C}, + RuneRange{0x1F5E, 0x1F5E}, + RuneRange{0x1F7E, 0x1F7F}, + RuneRange{0x1FB5, 0x1FB5}, + RuneRange{0x1FC5, 0x1FC5}, + RuneRange{0x1FD4, 0x1FD5}, + RuneRange{0x1FDC, 0x1FDC}, + RuneRange{0x1FF0, 0x1FF1}, + RuneRange{0x1FF5, 0x1FF5}, + RuneRange{0x1FFF, 0x1FFF}, + RuneRange{0x2053, 0x2056}, + RuneRange{0x2058, 0x205E}, + RuneRange{0x2064, 0x2069}, + RuneRange{0x2072, 0x2073}, + RuneRange{0x208F, 0x209F}, + RuneRange{0x20B2, 0x20CF}, + RuneRange{0x20EB, 0x20FF}, + RuneRange{0x213B, 0x213C}, + RuneRange{0x214C, 0x2152}, + RuneRange{0x2184, 0x218F}, + RuneRange{0x23CF, 0x23FF}, + RuneRange{0x2427, 0x243F}, + RuneRange{0x244B, 0x245F}, + RuneRange{0x24FF, 0x24FF}, + RuneRange{0x2614, 0x2615}, + RuneRange{0x2618, 0x2618}, + RuneRange{0x267E, 0x267F}, + RuneRange{0x268A, 0x2700}, + RuneRange{0x2705, 0x2705}, + RuneRange{0x270A, 0x270B}, + RuneRange{0x2728, 0x2728}, + RuneRange{0x274C, 0x274C}, + RuneRange{0x274E, 0x274E}, + RuneRange{0x2753, 0x2755}, + RuneRange{0x2757, 0x2757}, + RuneRange{0x275F, 0x2760}, + RuneRange{0x2795, 0x2797}, + RuneRange{0x27B0, 0x27B0}, + RuneRange{0x27BF, 0x27CF}, + RuneRange{0x27EC, 0x27EF}, + RuneRange{0x2B00, 0x2E7F}, + RuneRange{0x2E9A, 0x2E9A}, + RuneRange{0x2EF4, 0x2EFF}, + RuneRange{0x2FD6, 0x2FEF}, + RuneRange{0x2FFC, 0x2FFF}, + RuneRange{0x3040, 0x3040}, + RuneRange{0x3097, 0x3098}, + RuneRange{0x3100, 0x3104}, + RuneRange{0x312D, 0x3130}, + RuneRange{0x318F, 0x318F}, + RuneRange{0x31B8, 0x31EF}, + RuneRange{0x321D, 0x321F}, + RuneRange{0x3244, 0x3250}, + RuneRange{0x327C, 0x327E}, + RuneRange{0x32CC, 0x32CF}, + RuneRange{0x32FF, 0x32FF}, + RuneRange{0x3377, 0x337A}, + RuneRange{0x33DE, 0x33DF}, + RuneRange{0x33FF, 0x33FF}, + RuneRange{0x4DB6, 0x4DFF}, + RuneRange{0x9FA6, 0x9FFF}, + RuneRange{0xA48D, 0xA48F}, + RuneRange{0xA4C7, 0xABFF}, + RuneRange{0xD7A4, 0xD7FF}, + RuneRange{0xFA2E, 0xFA2F}, + RuneRange{0xFA6B, 0xFAFF}, + RuneRange{0xFB07, 0xFB12}, + RuneRange{0xFB18, 0xFB1C}, + RuneRange{0xFB37, 0xFB37}, + RuneRange{0xFB3D, 0xFB3D}, + RuneRange{0xFB3F, 0xFB3F}, + RuneRange{0xFB42, 0xFB42}, + RuneRange{0xFB45, 0xFB45}, + RuneRange{0xFBB2, 0xFBD2}, + RuneRange{0xFD40, 0xFD4F}, + RuneRange{0xFD90, 0xFD91}, + RuneRange{0xFDC8, 0xFDCF}, + RuneRange{0xFDFD, 0xFDFF}, + RuneRange{0xFE10, 0xFE1F}, + RuneRange{0xFE24, 0xFE2F}, + RuneRange{0xFE47, 0xFE48}, + RuneRange{0xFE53, 0xFE53}, + RuneRange{0xFE67, 0xFE67}, + RuneRange{0xFE6C, 0xFE6F}, + RuneRange{0xFE75, 0xFE75}, + RuneRange{0xFEFD, 0xFEFE}, + RuneRange{0xFF00, 0xFF00}, + RuneRange{0xFFBF, 0xFFC1}, + RuneRange{0xFFC8, 0xFFC9}, + RuneRange{0xFFD0, 0xFFD1}, + RuneRange{0xFFD8, 0xFFD9}, + RuneRange{0xFFDD, 0xFFDF}, + RuneRange{0xFFE7, 0xFFE7}, + RuneRange{0xFFEF, 0xFFF8}, + RuneRange{0x10000, 0x102FF}, + RuneRange{0x1031F, 0x1031F}, + RuneRange{0x10324, 0x1032F}, + RuneRange{0x1034B, 0x103FF}, + RuneRange{0x10426, 0x10427}, + RuneRange{0x1044E, 0x1CFFF}, + RuneRange{0x1D0F6, 0x1D0FF}, + RuneRange{0x1D127, 0x1D129}, + RuneRange{0x1D1DE, 0x1D3FF}, + RuneRange{0x1D455, 0x1D455}, + RuneRange{0x1D49D, 0x1D49D}, + RuneRange{0x1D4A0, 0x1D4A1}, + RuneRange{0x1D4A3, 0x1D4A4}, + RuneRange{0x1D4A7, 0x1D4A8}, + RuneRange{0x1D4AD, 0x1D4AD}, + RuneRange{0x1D4BA, 0x1D4BA}, + RuneRange{0x1D4BC, 0x1D4BC}, + RuneRange{0x1D4C1, 0x1D4C1}, + RuneRange{0x1D4C4, 0x1D4C4}, + RuneRange{0x1D506, 0x1D506}, + RuneRange{0x1D50B, 0x1D50C}, + RuneRange{0x1D515, 0x1D515}, + RuneRange{0x1D51D, 0x1D51D}, + RuneRange{0x1D53A, 0x1D53A}, + RuneRange{0x1D53F, 0x1D53F}, + RuneRange{0x1D545, 0x1D545}, + RuneRange{0x1D547, 0x1D549}, + RuneRange{0x1D551, 0x1D551}, + RuneRange{0x1D6A4, 0x1D6A7}, + RuneRange{0x1D7CA, 0x1D7CD}, + RuneRange{0x1D800, 0x1FFFD}, + RuneRange{0x2A6D7, 0x2F7FF}, + RuneRange{0x2FA1E, 0x2FFFD}, + RuneRange{0x30000, 0x3FFFD}, + RuneRange{0x40000, 0x4FFFD}, + RuneRange{0x50000, 0x5FFFD}, + RuneRange{0x60000, 0x6FFFD}, + RuneRange{0x70000, 0x7FFFD}, + RuneRange{0x80000, 0x8FFFD}, + RuneRange{0x90000, 0x9FFFD}, + RuneRange{0xA0000, 0xAFFFD}, + RuneRange{0xB0000, 0xBFFFD}, + RuneRange{0xC0000, 0xCFFFD}, + RuneRange{0xD0000, 0xDFFFD}, + RuneRange{0xE0000, 0xE0000}, + RuneRange{0xE0002, 0xE001F}, + RuneRange{0xE0080, 0xEFFFD}, +} + +// TableA1 represents RFC-3454 Table A.1. +var TableA1 Set = tableA1 + +var tableB1 = Mapping{ + 0x00AD: []rune{}, // Map to nothing + 0x034F: []rune{}, // Map to nothing + 0x180B: []rune{}, // Map to nothing + 0x180C: []rune{}, // Map to nothing + 0x180D: []rune{}, // Map to nothing + 0x200B: []rune{}, // Map to nothing + 0x200C: []rune{}, // Map to nothing + 0x200D: []rune{}, // Map to nothing + 0x2060: []rune{}, // Map to nothing + 0xFE00: []rune{}, // Map to nothing + 0xFE01: []rune{}, // Map to nothing + 0xFE02: []rune{}, // Map to nothing + 0xFE03: []rune{}, // Map to nothing + 0xFE04: []rune{}, // Map to nothing + 0xFE05: []rune{}, // Map to nothing + 0xFE06: []rune{}, // Map to nothing + 0xFE07: []rune{}, // Map to nothing + 0xFE08: []rune{}, // Map to nothing + 0xFE09: []rune{}, // Map to nothing + 0xFE0A: []rune{}, // Map to nothing + 0xFE0B: []rune{}, // Map to nothing + 0xFE0C: []rune{}, // Map to nothing + 0xFE0D: []rune{}, // Map to nothing + 0xFE0E: []rune{}, // Map to nothing + 0xFE0F: []rune{}, // Map to nothing + 0xFEFF: []rune{}, // Map to nothing +} + +// TableB1 represents RFC-3454 Table B.1. +var TableB1 Mapping = tableB1 + +var tableB2 = Mapping{ + 0x0041: []rune{0x0061}, // Case map + 0x0042: []rune{0x0062}, // Case map + 0x0043: []rune{0x0063}, // Case map + 0x0044: []rune{0x0064}, // Case map + 0x0045: []rune{0x0065}, // Case map + 0x0046: []rune{0x0066}, // Case map + 0x0047: []rune{0x0067}, // Case map + 0x0048: []rune{0x0068}, // Case map + 0x0049: []rune{0x0069}, // Case map + 0x004A: []rune{0x006A}, // Case map + 0x004B: []rune{0x006B}, // Case map + 0x004C: []rune{0x006C}, // Case map + 0x004D: []rune{0x006D}, // Case map + 0x004E: []rune{0x006E}, // Case map + 0x004F: []rune{0x006F}, // Case map + 0x0050: []rune{0x0070}, // Case map + 0x0051: []rune{0x0071}, // Case map + 0x0052: []rune{0x0072}, // Case map + 0x0053: []rune{0x0073}, // Case map + 0x0054: []rune{0x0074}, // Case map + 0x0055: []rune{0x0075}, // Case map + 0x0056: []rune{0x0076}, // Case map + 0x0057: []rune{0x0077}, // Case map + 0x0058: []rune{0x0078}, // Case map + 0x0059: []rune{0x0079}, // Case map + 0x005A: []rune{0x007A}, // Case map + 0x00B5: []rune{0x03BC}, // Case map + 0x00C0: []rune{0x00E0}, // Case map + 0x00C1: []rune{0x00E1}, // Case map + 0x00C2: []rune{0x00E2}, // Case map + 0x00C3: []rune{0x00E3}, // Case map + 0x00C4: []rune{0x00E4}, // Case map + 0x00C5: []rune{0x00E5}, // Case map + 0x00C6: []rune{0x00E6}, // Case map + 0x00C7: []rune{0x00E7}, // Case map + 0x00C8: []rune{0x00E8}, // Case map + 0x00C9: []rune{0x00E9}, // Case map + 0x00CA: []rune{0x00EA}, // Case map + 0x00CB: []rune{0x00EB}, // Case map + 0x00CC: []rune{0x00EC}, // Case map + 0x00CD: []rune{0x00ED}, // Case map + 0x00CE: []rune{0x00EE}, // Case map + 0x00CF: []rune{0x00EF}, // Case map + 0x00D0: []rune{0x00F0}, // Case map + 0x00D1: []rune{0x00F1}, // Case map + 0x00D2: []rune{0x00F2}, // Case map + 0x00D3: []rune{0x00F3}, // Case map + 0x00D4: []rune{0x00F4}, // Case map + 0x00D5: []rune{0x00F5}, // Case map + 0x00D6: []rune{0x00F6}, // Case map + 0x00D8: []rune{0x00F8}, // Case map + 0x00D9: []rune{0x00F9}, // Case map + 0x00DA: []rune{0x00FA}, // Case map + 0x00DB: []rune{0x00FB}, // Case map + 0x00DC: []rune{0x00FC}, // Case map + 0x00DD: []rune{0x00FD}, // Case map + 0x00DE: []rune{0x00FE}, // Case map + 0x00DF: []rune{0x0073, 0x0073}, // Case map + 0x0100: []rune{0x0101}, // Case map + 0x0102: []rune{0x0103}, // Case map + 0x0104: []rune{0x0105}, // Case map + 0x0106: []rune{0x0107}, // Case map + 0x0108: []rune{0x0109}, // Case map + 0x010A: []rune{0x010B}, // Case map + 0x010C: []rune{0x010D}, // Case map + 0x010E: []rune{0x010F}, // Case map + 0x0110: []rune{0x0111}, // Case map + 0x0112: []rune{0x0113}, // Case map + 0x0114: []rune{0x0115}, // Case map + 0x0116: []rune{0x0117}, // Case map + 0x0118: []rune{0x0119}, // Case map + 0x011A: []rune{0x011B}, // Case map + 0x011C: []rune{0x011D}, // Case map + 0x011E: []rune{0x011F}, // Case map + 0x0120: []rune{0x0121}, // Case map + 0x0122: []rune{0x0123}, // Case map + 0x0124: []rune{0x0125}, // Case map + 0x0126: []rune{0x0127}, // Case map + 0x0128: []rune{0x0129}, // Case map + 0x012A: []rune{0x012B}, // Case map + 0x012C: []rune{0x012D}, // Case map + 0x012E: []rune{0x012F}, // Case map + 0x0130: []rune{0x0069, 0x0307}, // Case map + 0x0132: []rune{0x0133}, // Case map + 0x0134: []rune{0x0135}, // Case map + 0x0136: []rune{0x0137}, // Case map + 0x0139: []rune{0x013A}, // Case map + 0x013B: []rune{0x013C}, // Case map + 0x013D: []rune{0x013E}, // Case map + 0x013F: []rune{0x0140}, // Case map + 0x0141: []rune{0x0142}, // Case map + 0x0143: []rune{0x0144}, // Case map + 0x0145: []rune{0x0146}, // Case map + 0x0147: []rune{0x0148}, // Case map + 0x0149: []rune{0x02BC, 0x006E}, // Case map + 0x014A: []rune{0x014B}, // Case map + 0x014C: []rune{0x014D}, // Case map + 0x014E: []rune{0x014F}, // Case map + 0x0150: []rune{0x0151}, // Case map + 0x0152: []rune{0x0153}, // Case map + 0x0154: []rune{0x0155}, // Case map + 0x0156: []rune{0x0157}, // Case map + 0x0158: []rune{0x0159}, // Case map + 0x015A: []rune{0x015B}, // Case map + 0x015C: []rune{0x015D}, // Case map + 0x015E: []rune{0x015F}, // Case map + 0x0160: []rune{0x0161}, // Case map + 0x0162: []rune{0x0163}, // Case map + 0x0164: []rune{0x0165}, // Case map + 0x0166: []rune{0x0167}, // Case map + 0x0168: []rune{0x0169}, // Case map + 0x016A: []rune{0x016B}, // Case map + 0x016C: []rune{0x016D}, // Case map + 0x016E: []rune{0x016F}, // Case map + 0x0170: []rune{0x0171}, // Case map + 0x0172: []rune{0x0173}, // Case map + 0x0174: []rune{0x0175}, // Case map + 0x0176: []rune{0x0177}, // Case map + 0x0178: []rune{0x00FF}, // Case map + 0x0179: []rune{0x017A}, // Case map + 0x017B: []rune{0x017C}, // Case map + 0x017D: []rune{0x017E}, // Case map + 0x017F: []rune{0x0073}, // Case map + 0x0181: []rune{0x0253}, // Case map + 0x0182: []rune{0x0183}, // Case map + 0x0184: []rune{0x0185}, // Case map + 0x0186: []rune{0x0254}, // Case map + 0x0187: []rune{0x0188}, // Case map + 0x0189: []rune{0x0256}, // Case map + 0x018A: []rune{0x0257}, // Case map + 0x018B: []rune{0x018C}, // Case map + 0x018E: []rune{0x01DD}, // Case map + 0x018F: []rune{0x0259}, // Case map + 0x0190: []rune{0x025B}, // Case map + 0x0191: []rune{0x0192}, // Case map + 0x0193: []rune{0x0260}, // Case map + 0x0194: []rune{0x0263}, // Case map + 0x0196: []rune{0x0269}, // Case map + 0x0197: []rune{0x0268}, // Case map + 0x0198: []rune{0x0199}, // Case map + 0x019C: []rune{0x026F}, // Case map + 0x019D: []rune{0x0272}, // Case map + 0x019F: []rune{0x0275}, // Case map + 0x01A0: []rune{0x01A1}, // Case map + 0x01A2: []rune{0x01A3}, // Case map + 0x01A4: []rune{0x01A5}, // Case map + 0x01A6: []rune{0x0280}, // Case map + 0x01A7: []rune{0x01A8}, // Case map + 0x01A9: []rune{0x0283}, // Case map + 0x01AC: []rune{0x01AD}, // Case map + 0x01AE: []rune{0x0288}, // Case map + 0x01AF: []rune{0x01B0}, // Case map + 0x01B1: []rune{0x028A}, // Case map + 0x01B2: []rune{0x028B}, // Case map + 0x01B3: []rune{0x01B4}, // Case map + 0x01B5: []rune{0x01B6}, // Case map + 0x01B7: []rune{0x0292}, // Case map + 0x01B8: []rune{0x01B9}, // Case map + 0x01BC: []rune{0x01BD}, // Case map + 0x01C4: []rune{0x01C6}, // Case map + 0x01C5: []rune{0x01C6}, // Case map + 0x01C7: []rune{0x01C9}, // Case map + 0x01C8: []rune{0x01C9}, // Case map + 0x01CA: []rune{0x01CC}, // Case map + 0x01CB: []rune{0x01CC}, // Case map + 0x01CD: []rune{0x01CE}, // Case map + 0x01CF: []rune{0x01D0}, // Case map + 0x01D1: []rune{0x01D2}, // Case map + 0x01D3: []rune{0x01D4}, // Case map + 0x01D5: []rune{0x01D6}, // Case map + 0x01D7: []rune{0x01D8}, // Case map + 0x01D9: []rune{0x01DA}, // Case map + 0x01DB: []rune{0x01DC}, // Case map + 0x01DE: []rune{0x01DF}, // Case map + 0x01E0: []rune{0x01E1}, // Case map + 0x01E2: []rune{0x01E3}, // Case map + 0x01E4: []rune{0x01E5}, // Case map + 0x01E6: []rune{0x01E7}, // Case map + 0x01E8: []rune{0x01E9}, // Case map + 0x01EA: []rune{0x01EB}, // Case map + 0x01EC: []rune{0x01ED}, // Case map + 0x01EE: []rune{0x01EF}, // Case map + 0x01F0: []rune{0x006A, 0x030C}, // Case map + 0x01F1: []rune{0x01F3}, // Case map + 0x01F2: []rune{0x01F3}, // Case map + 0x01F4: []rune{0x01F5}, // Case map + 0x01F6: []rune{0x0195}, // Case map + 0x01F7: []rune{0x01BF}, // Case map + 0x01F8: []rune{0x01F9}, // Case map + 0x01FA: []rune{0x01FB}, // Case map + 0x01FC: []rune{0x01FD}, // Case map + 0x01FE: []rune{0x01FF}, // Case map + 0x0200: []rune{0x0201}, // Case map + 0x0202: []rune{0x0203}, // Case map + 0x0204: []rune{0x0205}, // Case map + 0x0206: []rune{0x0207}, // Case map + 0x0208: []rune{0x0209}, // Case map + 0x020A: []rune{0x020B}, // Case map + 0x020C: []rune{0x020D}, // Case map + 0x020E: []rune{0x020F}, // Case map + 0x0210: []rune{0x0211}, // Case map + 0x0212: []rune{0x0213}, // Case map + 0x0214: []rune{0x0215}, // Case map + 0x0216: []rune{0x0217}, // Case map + 0x0218: []rune{0x0219}, // Case map + 0x021A: []rune{0x021B}, // Case map + 0x021C: []rune{0x021D}, // Case map + 0x021E: []rune{0x021F}, // Case map + 0x0220: []rune{0x019E}, // Case map + 0x0222: []rune{0x0223}, // Case map + 0x0224: []rune{0x0225}, // Case map + 0x0226: []rune{0x0227}, // Case map + 0x0228: []rune{0x0229}, // Case map + 0x022A: []rune{0x022B}, // Case map + 0x022C: []rune{0x022D}, // Case map + 0x022E: []rune{0x022F}, // Case map + 0x0230: []rune{0x0231}, // Case map + 0x0232: []rune{0x0233}, // Case map + 0x0345: []rune{0x03B9}, // Case map + 0x037A: []rune{0x0020, 0x03B9}, // Additional folding + 0x0386: []rune{0x03AC}, // Case map + 0x0388: []rune{0x03AD}, // Case map + 0x0389: []rune{0x03AE}, // Case map + 0x038A: []rune{0x03AF}, // Case map + 0x038C: []rune{0x03CC}, // Case map + 0x038E: []rune{0x03CD}, // Case map + 0x038F: []rune{0x03CE}, // Case map + 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x0391: []rune{0x03B1}, // Case map + 0x0392: []rune{0x03B2}, // Case map + 0x0393: []rune{0x03B3}, // Case map + 0x0394: []rune{0x03B4}, // Case map + 0x0395: []rune{0x03B5}, // Case map + 0x0396: []rune{0x03B6}, // Case map + 0x0397: []rune{0x03B7}, // Case map + 0x0398: []rune{0x03B8}, // Case map + 0x0399: []rune{0x03B9}, // Case map + 0x039A: []rune{0x03BA}, // Case map + 0x039B: []rune{0x03BB}, // Case map + 0x039C: []rune{0x03BC}, // Case map + 0x039D: []rune{0x03BD}, // Case map + 0x039E: []rune{0x03BE}, // Case map + 0x039F: []rune{0x03BF}, // Case map + 0x03A0: []rune{0x03C0}, // Case map + 0x03A1: []rune{0x03C1}, // Case map + 0x03A3: []rune{0x03C3}, // Case map + 0x03A4: []rune{0x03C4}, // Case map + 0x03A5: []rune{0x03C5}, // Case map + 0x03A6: []rune{0x03C6}, // Case map + 0x03A7: []rune{0x03C7}, // Case map + 0x03A8: []rune{0x03C8}, // Case map + 0x03A9: []rune{0x03C9}, // Case map + 0x03AA: []rune{0x03CA}, // Case map + 0x03AB: []rune{0x03CB}, // Case map + 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x03C2: []rune{0x03C3}, // Case map + 0x03D0: []rune{0x03B2}, // Case map + 0x03D1: []rune{0x03B8}, // Case map + 0x03D2: []rune{0x03C5}, // Additional folding + 0x03D3: []rune{0x03CD}, // Additional folding + 0x03D4: []rune{0x03CB}, // Additional folding + 0x03D5: []rune{0x03C6}, // Case map + 0x03D6: []rune{0x03C0}, // Case map + 0x03D8: []rune{0x03D9}, // Case map + 0x03DA: []rune{0x03DB}, // Case map + 0x03DC: []rune{0x03DD}, // Case map + 0x03DE: []rune{0x03DF}, // Case map + 0x03E0: []rune{0x03E1}, // Case map + 0x03E2: []rune{0x03E3}, // Case map + 0x03E4: []rune{0x03E5}, // Case map + 0x03E6: []rune{0x03E7}, // Case map + 0x03E8: []rune{0x03E9}, // Case map + 0x03EA: []rune{0x03EB}, // Case map + 0x03EC: []rune{0x03ED}, // Case map + 0x03EE: []rune{0x03EF}, // Case map + 0x03F0: []rune{0x03BA}, // Case map + 0x03F1: []rune{0x03C1}, // Case map + 0x03F2: []rune{0x03C3}, // Case map + 0x03F4: []rune{0x03B8}, // Case map + 0x03F5: []rune{0x03B5}, // Case map + 0x0400: []rune{0x0450}, // Case map + 0x0401: []rune{0x0451}, // Case map + 0x0402: []rune{0x0452}, // Case map + 0x0403: []rune{0x0453}, // Case map + 0x0404: []rune{0x0454}, // Case map + 0x0405: []rune{0x0455}, // Case map + 0x0406: []rune{0x0456}, // Case map + 0x0407: []rune{0x0457}, // Case map + 0x0408: []rune{0x0458}, // Case map + 0x0409: []rune{0x0459}, // Case map + 0x040A: []rune{0x045A}, // Case map + 0x040B: []rune{0x045B}, // Case map + 0x040C: []rune{0x045C}, // Case map + 0x040D: []rune{0x045D}, // Case map + 0x040E: []rune{0x045E}, // Case map + 0x040F: []rune{0x045F}, // Case map + 0x0410: []rune{0x0430}, // Case map + 0x0411: []rune{0x0431}, // Case map + 0x0412: []rune{0x0432}, // Case map + 0x0413: []rune{0x0433}, // Case map + 0x0414: []rune{0x0434}, // Case map + 0x0415: []rune{0x0435}, // Case map + 0x0416: []rune{0x0436}, // Case map + 0x0417: []rune{0x0437}, // Case map + 0x0418: []rune{0x0438}, // Case map + 0x0419: []rune{0x0439}, // Case map + 0x041A: []rune{0x043A}, // Case map + 0x041B: []rune{0x043B}, // Case map + 0x041C: []rune{0x043C}, // Case map + 0x041D: []rune{0x043D}, // Case map + 0x041E: []rune{0x043E}, // Case map + 0x041F: []rune{0x043F}, // Case map + 0x0420: []rune{0x0440}, // Case map + 0x0421: []rune{0x0441}, // Case map + 0x0422: []rune{0x0442}, // Case map + 0x0423: []rune{0x0443}, // Case map + 0x0424: []rune{0x0444}, // Case map + 0x0425: []rune{0x0445}, // Case map + 0x0426: []rune{0x0446}, // Case map + 0x0427: []rune{0x0447}, // Case map + 0x0428: []rune{0x0448}, // Case map + 0x0429: []rune{0x0449}, // Case map + 0x042A: []rune{0x044A}, // Case map + 0x042B: []rune{0x044B}, // Case map + 0x042C: []rune{0x044C}, // Case map + 0x042D: []rune{0x044D}, // Case map + 0x042E: []rune{0x044E}, // Case map + 0x042F: []rune{0x044F}, // Case map + 0x0460: []rune{0x0461}, // Case map + 0x0462: []rune{0x0463}, // Case map + 0x0464: []rune{0x0465}, // Case map + 0x0466: []rune{0x0467}, // Case map + 0x0468: []rune{0x0469}, // Case map + 0x046A: []rune{0x046B}, // Case map + 0x046C: []rune{0x046D}, // Case map + 0x046E: []rune{0x046F}, // Case map + 0x0470: []rune{0x0471}, // Case map + 0x0472: []rune{0x0473}, // Case map + 0x0474: []rune{0x0475}, // Case map + 0x0476: []rune{0x0477}, // Case map + 0x0478: []rune{0x0479}, // Case map + 0x047A: []rune{0x047B}, // Case map + 0x047C: []rune{0x047D}, // Case map + 0x047E: []rune{0x047F}, // Case map + 0x0480: []rune{0x0481}, // Case map + 0x048A: []rune{0x048B}, // Case map + 0x048C: []rune{0x048D}, // Case map + 0x048E: []rune{0x048F}, // Case map + 0x0490: []rune{0x0491}, // Case map + 0x0492: []rune{0x0493}, // Case map + 0x0494: []rune{0x0495}, // Case map + 0x0496: []rune{0x0497}, // Case map + 0x0498: []rune{0x0499}, // Case map + 0x049A: []rune{0x049B}, // Case map + 0x049C: []rune{0x049D}, // Case map + 0x049E: []rune{0x049F}, // Case map + 0x04A0: []rune{0x04A1}, // Case map + 0x04A2: []rune{0x04A3}, // Case map + 0x04A4: []rune{0x04A5}, // Case map + 0x04A6: []rune{0x04A7}, // Case map + 0x04A8: []rune{0x04A9}, // Case map + 0x04AA: []rune{0x04AB}, // Case map + 0x04AC: []rune{0x04AD}, // Case map + 0x04AE: []rune{0x04AF}, // Case map + 0x04B0: []rune{0x04B1}, // Case map + 0x04B2: []rune{0x04B3}, // Case map + 0x04B4: []rune{0x04B5}, // Case map + 0x04B6: []rune{0x04B7}, // Case map + 0x04B8: []rune{0x04B9}, // Case map + 0x04BA: []rune{0x04BB}, // Case map + 0x04BC: []rune{0x04BD}, // Case map + 0x04BE: []rune{0x04BF}, // Case map + 0x04C1: []rune{0x04C2}, // Case map + 0x04C3: []rune{0x04C4}, // Case map + 0x04C5: []rune{0x04C6}, // Case map + 0x04C7: []rune{0x04C8}, // Case map + 0x04C9: []rune{0x04CA}, // Case map + 0x04CB: []rune{0x04CC}, // Case map + 0x04CD: []rune{0x04CE}, // Case map + 0x04D0: []rune{0x04D1}, // Case map + 0x04D2: []rune{0x04D3}, // Case map + 0x04D4: []rune{0x04D5}, // Case map + 0x04D6: []rune{0x04D7}, // Case map + 0x04D8: []rune{0x04D9}, // Case map + 0x04DA: []rune{0x04DB}, // Case map + 0x04DC: []rune{0x04DD}, // Case map + 0x04DE: []rune{0x04DF}, // Case map + 0x04E0: []rune{0x04E1}, // Case map + 0x04E2: []rune{0x04E3}, // Case map + 0x04E4: []rune{0x04E5}, // Case map + 0x04E6: []rune{0x04E7}, // Case map + 0x04E8: []rune{0x04E9}, // Case map + 0x04EA: []rune{0x04EB}, // Case map + 0x04EC: []rune{0x04ED}, // Case map + 0x04EE: []rune{0x04EF}, // Case map + 0x04F0: []rune{0x04F1}, // Case map + 0x04F2: []rune{0x04F3}, // Case map + 0x04F4: []rune{0x04F5}, // Case map + 0x04F8: []rune{0x04F9}, // Case map + 0x0500: []rune{0x0501}, // Case map + 0x0502: []rune{0x0503}, // Case map + 0x0504: []rune{0x0505}, // Case map + 0x0506: []rune{0x0507}, // Case map + 0x0508: []rune{0x0509}, // Case map + 0x050A: []rune{0x050B}, // Case map + 0x050C: []rune{0x050D}, // Case map + 0x050E: []rune{0x050F}, // Case map + 0x0531: []rune{0x0561}, // Case map + 0x0532: []rune{0x0562}, // Case map + 0x0533: []rune{0x0563}, // Case map + 0x0534: []rune{0x0564}, // Case map + 0x0535: []rune{0x0565}, // Case map + 0x0536: []rune{0x0566}, // Case map + 0x0537: []rune{0x0567}, // Case map + 0x0538: []rune{0x0568}, // Case map + 0x0539: []rune{0x0569}, // Case map + 0x053A: []rune{0x056A}, // Case map + 0x053B: []rune{0x056B}, // Case map + 0x053C: []rune{0x056C}, // Case map + 0x053D: []rune{0x056D}, // Case map + 0x053E: []rune{0x056E}, // Case map + 0x053F: []rune{0x056F}, // Case map + 0x0540: []rune{0x0570}, // Case map + 0x0541: []rune{0x0571}, // Case map + 0x0542: []rune{0x0572}, // Case map + 0x0543: []rune{0x0573}, // Case map + 0x0544: []rune{0x0574}, // Case map + 0x0545: []rune{0x0575}, // Case map + 0x0546: []rune{0x0576}, // Case map + 0x0547: []rune{0x0577}, // Case map + 0x0548: []rune{0x0578}, // Case map + 0x0549: []rune{0x0579}, // Case map + 0x054A: []rune{0x057A}, // Case map + 0x054B: []rune{0x057B}, // Case map + 0x054C: []rune{0x057C}, // Case map + 0x054D: []rune{0x057D}, // Case map + 0x054E: []rune{0x057E}, // Case map + 0x054F: []rune{0x057F}, // Case map + 0x0550: []rune{0x0580}, // Case map + 0x0551: []rune{0x0581}, // Case map + 0x0552: []rune{0x0582}, // Case map + 0x0553: []rune{0x0583}, // Case map + 0x0554: []rune{0x0584}, // Case map + 0x0555: []rune{0x0585}, // Case map + 0x0556: []rune{0x0586}, // Case map + 0x0587: []rune{0x0565, 0x0582}, // Case map + 0x1E00: []rune{0x1E01}, // Case map + 0x1E02: []rune{0x1E03}, // Case map + 0x1E04: []rune{0x1E05}, // Case map + 0x1E06: []rune{0x1E07}, // Case map + 0x1E08: []rune{0x1E09}, // Case map + 0x1E0A: []rune{0x1E0B}, // Case map + 0x1E0C: []rune{0x1E0D}, // Case map + 0x1E0E: []rune{0x1E0F}, // Case map + 0x1E10: []rune{0x1E11}, // Case map + 0x1E12: []rune{0x1E13}, // Case map + 0x1E14: []rune{0x1E15}, // Case map + 0x1E16: []rune{0x1E17}, // Case map + 0x1E18: []rune{0x1E19}, // Case map + 0x1E1A: []rune{0x1E1B}, // Case map + 0x1E1C: []rune{0x1E1D}, // Case map + 0x1E1E: []rune{0x1E1F}, // Case map + 0x1E20: []rune{0x1E21}, // Case map + 0x1E22: []rune{0x1E23}, // Case map + 0x1E24: []rune{0x1E25}, // Case map + 0x1E26: []rune{0x1E27}, // Case map + 0x1E28: []rune{0x1E29}, // Case map + 0x1E2A: []rune{0x1E2B}, // Case map + 0x1E2C: []rune{0x1E2D}, // Case map + 0x1E2E: []rune{0x1E2F}, // Case map + 0x1E30: []rune{0x1E31}, // Case map + 0x1E32: []rune{0x1E33}, // Case map + 0x1E34: []rune{0x1E35}, // Case map + 0x1E36: []rune{0x1E37}, // Case map + 0x1E38: []rune{0x1E39}, // Case map + 0x1E3A: []rune{0x1E3B}, // Case map + 0x1E3C: []rune{0x1E3D}, // Case map + 0x1E3E: []rune{0x1E3F}, // Case map + 0x1E40: []rune{0x1E41}, // Case map + 0x1E42: []rune{0x1E43}, // Case map + 0x1E44: []rune{0x1E45}, // Case map + 0x1E46: []rune{0x1E47}, // Case map + 0x1E48: []rune{0x1E49}, // Case map + 0x1E4A: []rune{0x1E4B}, // Case map + 0x1E4C: []rune{0x1E4D}, // Case map + 0x1E4E: []rune{0x1E4F}, // Case map + 0x1E50: []rune{0x1E51}, // Case map + 0x1E52: []rune{0x1E53}, // Case map + 0x1E54: []rune{0x1E55}, // Case map + 0x1E56: []rune{0x1E57}, // Case map + 0x1E58: []rune{0x1E59}, // Case map + 0x1E5A: []rune{0x1E5B}, // Case map + 0x1E5C: []rune{0x1E5D}, // Case map + 0x1E5E: []rune{0x1E5F}, // Case map + 0x1E60: []rune{0x1E61}, // Case map + 0x1E62: []rune{0x1E63}, // Case map + 0x1E64: []rune{0x1E65}, // Case map + 0x1E66: []rune{0x1E67}, // Case map + 0x1E68: []rune{0x1E69}, // Case map + 0x1E6A: []rune{0x1E6B}, // Case map + 0x1E6C: []rune{0x1E6D}, // Case map + 0x1E6E: []rune{0x1E6F}, // Case map + 0x1E70: []rune{0x1E71}, // Case map + 0x1E72: []rune{0x1E73}, // Case map + 0x1E74: []rune{0x1E75}, // Case map + 0x1E76: []rune{0x1E77}, // Case map + 0x1E78: []rune{0x1E79}, // Case map + 0x1E7A: []rune{0x1E7B}, // Case map + 0x1E7C: []rune{0x1E7D}, // Case map + 0x1E7E: []rune{0x1E7F}, // Case map + 0x1E80: []rune{0x1E81}, // Case map + 0x1E82: []rune{0x1E83}, // Case map + 0x1E84: []rune{0x1E85}, // Case map + 0x1E86: []rune{0x1E87}, // Case map + 0x1E88: []rune{0x1E89}, // Case map + 0x1E8A: []rune{0x1E8B}, // Case map + 0x1E8C: []rune{0x1E8D}, // Case map + 0x1E8E: []rune{0x1E8F}, // Case map + 0x1E90: []rune{0x1E91}, // Case map + 0x1E92: []rune{0x1E93}, // Case map + 0x1E94: []rune{0x1E95}, // Case map + 0x1E96: []rune{0x0068, 0x0331}, // Case map + 0x1E97: []rune{0x0074, 0x0308}, // Case map + 0x1E98: []rune{0x0077, 0x030A}, // Case map + 0x1E99: []rune{0x0079, 0x030A}, // Case map + 0x1E9A: []rune{0x0061, 0x02BE}, // Case map + 0x1E9B: []rune{0x1E61}, // Case map + 0x1EA0: []rune{0x1EA1}, // Case map + 0x1EA2: []rune{0x1EA3}, // Case map + 0x1EA4: []rune{0x1EA5}, // Case map + 0x1EA6: []rune{0x1EA7}, // Case map + 0x1EA8: []rune{0x1EA9}, // Case map + 0x1EAA: []rune{0x1EAB}, // Case map + 0x1EAC: []rune{0x1EAD}, // Case map + 0x1EAE: []rune{0x1EAF}, // Case map + 0x1EB0: []rune{0x1EB1}, // Case map + 0x1EB2: []rune{0x1EB3}, // Case map + 0x1EB4: []rune{0x1EB5}, // Case map + 0x1EB6: []rune{0x1EB7}, // Case map + 0x1EB8: []rune{0x1EB9}, // Case map + 0x1EBA: []rune{0x1EBB}, // Case map + 0x1EBC: []rune{0x1EBD}, // Case map + 0x1EBE: []rune{0x1EBF}, // Case map + 0x1EC0: []rune{0x1EC1}, // Case map + 0x1EC2: []rune{0x1EC3}, // Case map + 0x1EC4: []rune{0x1EC5}, // Case map + 0x1EC6: []rune{0x1EC7}, // Case map + 0x1EC8: []rune{0x1EC9}, // Case map + 0x1ECA: []rune{0x1ECB}, // Case map + 0x1ECC: []rune{0x1ECD}, // Case map + 0x1ECE: []rune{0x1ECF}, // Case map + 0x1ED0: []rune{0x1ED1}, // Case map + 0x1ED2: []rune{0x1ED3}, // Case map + 0x1ED4: []rune{0x1ED5}, // Case map + 0x1ED6: []rune{0x1ED7}, // Case map + 0x1ED8: []rune{0x1ED9}, // Case map + 0x1EDA: []rune{0x1EDB}, // Case map + 0x1EDC: []rune{0x1EDD}, // Case map + 0x1EDE: []rune{0x1EDF}, // Case map + 0x1EE0: []rune{0x1EE1}, // Case map + 0x1EE2: []rune{0x1EE3}, // Case map + 0x1EE4: []rune{0x1EE5}, // Case map + 0x1EE6: []rune{0x1EE7}, // Case map + 0x1EE8: []rune{0x1EE9}, // Case map + 0x1EEA: []rune{0x1EEB}, // Case map + 0x1EEC: []rune{0x1EED}, // Case map + 0x1EEE: []rune{0x1EEF}, // Case map + 0x1EF0: []rune{0x1EF1}, // Case map + 0x1EF2: []rune{0x1EF3}, // Case map + 0x1EF4: []rune{0x1EF5}, // Case map + 0x1EF6: []rune{0x1EF7}, // Case map + 0x1EF8: []rune{0x1EF9}, // Case map + 0x1F08: []rune{0x1F00}, // Case map + 0x1F09: []rune{0x1F01}, // Case map + 0x1F0A: []rune{0x1F02}, // Case map + 0x1F0B: []rune{0x1F03}, // Case map + 0x1F0C: []rune{0x1F04}, // Case map + 0x1F0D: []rune{0x1F05}, // Case map + 0x1F0E: []rune{0x1F06}, // Case map + 0x1F0F: []rune{0x1F07}, // Case map + 0x1F18: []rune{0x1F10}, // Case map + 0x1F19: []rune{0x1F11}, // Case map + 0x1F1A: []rune{0x1F12}, // Case map + 0x1F1B: []rune{0x1F13}, // Case map + 0x1F1C: []rune{0x1F14}, // Case map + 0x1F1D: []rune{0x1F15}, // Case map + 0x1F28: []rune{0x1F20}, // Case map + 0x1F29: []rune{0x1F21}, // Case map + 0x1F2A: []rune{0x1F22}, // Case map + 0x1F2B: []rune{0x1F23}, // Case map + 0x1F2C: []rune{0x1F24}, // Case map + 0x1F2D: []rune{0x1F25}, // Case map + 0x1F2E: []rune{0x1F26}, // Case map + 0x1F2F: []rune{0x1F27}, // Case map + 0x1F38: []rune{0x1F30}, // Case map + 0x1F39: []rune{0x1F31}, // Case map + 0x1F3A: []rune{0x1F32}, // Case map + 0x1F3B: []rune{0x1F33}, // Case map + 0x1F3C: []rune{0x1F34}, // Case map + 0x1F3D: []rune{0x1F35}, // Case map + 0x1F3E: []rune{0x1F36}, // Case map + 0x1F3F: []rune{0x1F37}, // Case map + 0x1F48: []rune{0x1F40}, // Case map + 0x1F49: []rune{0x1F41}, // Case map + 0x1F4A: []rune{0x1F42}, // Case map + 0x1F4B: []rune{0x1F43}, // Case map + 0x1F4C: []rune{0x1F44}, // Case map + 0x1F4D: []rune{0x1F45}, // Case map + 0x1F50: []rune{0x03C5, 0x0313}, // Case map + 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map + 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map + 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map + 0x1F59: []rune{0x1F51}, // Case map + 0x1F5B: []rune{0x1F53}, // Case map + 0x1F5D: []rune{0x1F55}, // Case map + 0x1F5F: []rune{0x1F57}, // Case map + 0x1F68: []rune{0x1F60}, // Case map + 0x1F69: []rune{0x1F61}, // Case map + 0x1F6A: []rune{0x1F62}, // Case map + 0x1F6B: []rune{0x1F63}, // Case map + 0x1F6C: []rune{0x1F64}, // Case map + 0x1F6D: []rune{0x1F65}, // Case map + 0x1F6E: []rune{0x1F66}, // Case map + 0x1F6F: []rune{0x1F67}, // Case map + 0x1F80: []rune{0x1F00, 0x03B9}, // Case map + 0x1F81: []rune{0x1F01, 0x03B9}, // Case map + 0x1F82: []rune{0x1F02, 0x03B9}, // Case map + 0x1F83: []rune{0x1F03, 0x03B9}, // Case map + 0x1F84: []rune{0x1F04, 0x03B9}, // Case map + 0x1F85: []rune{0x1F05, 0x03B9}, // Case map + 0x1F86: []rune{0x1F06, 0x03B9}, // Case map + 0x1F87: []rune{0x1F07, 0x03B9}, // Case map + 0x1F88: []rune{0x1F00, 0x03B9}, // Case map + 0x1F89: []rune{0x1F01, 0x03B9}, // Case map + 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map + 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map + 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map + 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map + 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map + 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map + 0x1F90: []rune{0x1F20, 0x03B9}, // Case map + 0x1F91: []rune{0x1F21, 0x03B9}, // Case map + 0x1F92: []rune{0x1F22, 0x03B9}, // Case map + 0x1F93: []rune{0x1F23, 0x03B9}, // Case map + 0x1F94: []rune{0x1F24, 0x03B9}, // Case map + 0x1F95: []rune{0x1F25, 0x03B9}, // Case map + 0x1F96: []rune{0x1F26, 0x03B9}, // Case map + 0x1F97: []rune{0x1F27, 0x03B9}, // Case map + 0x1F98: []rune{0x1F20, 0x03B9}, // Case map + 0x1F99: []rune{0x1F21, 0x03B9}, // Case map + 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map + 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map + 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map + 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map + 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map + 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map + 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map + 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map + 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map + 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map + 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map + 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map + 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map + 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map + 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map + 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map + 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map + 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map + 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map + 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map + 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map + 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map + 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map + 0x1FB6: []rune{0x03B1, 0x0342}, // Case map + 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map + 0x1FB8: []rune{0x1FB0}, // Case map + 0x1FB9: []rune{0x1FB1}, // Case map + 0x1FBA: []rune{0x1F70}, // Case map + 0x1FBB: []rune{0x1F71}, // Case map + 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map + 0x1FBE: []rune{0x03B9}, // Case map + 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map + 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map + 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map + 0x1FC6: []rune{0x03B7, 0x0342}, // Case map + 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map + 0x1FC8: []rune{0x1F72}, // Case map + 0x1FC9: []rune{0x1F73}, // Case map + 0x1FCA: []rune{0x1F74}, // Case map + 0x1FCB: []rune{0x1F75}, // Case map + 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map + 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map + 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x1FD6: []rune{0x03B9, 0x0342}, // Case map + 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map + 0x1FD8: []rune{0x1FD0}, // Case map + 0x1FD9: []rune{0x1FD1}, // Case map + 0x1FDA: []rune{0x1F76}, // Case map + 0x1FDB: []rune{0x1F77}, // Case map + 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map + 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x1FE4: []rune{0x03C1, 0x0313}, // Case map + 0x1FE6: []rune{0x03C5, 0x0342}, // Case map + 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map + 0x1FE8: []rune{0x1FE0}, // Case map + 0x1FE9: []rune{0x1FE1}, // Case map + 0x1FEA: []rune{0x1F7A}, // Case map + 0x1FEB: []rune{0x1F7B}, // Case map + 0x1FEC: []rune{0x1FE5}, // Case map + 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map + 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map + 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map + 0x1FF6: []rune{0x03C9, 0x0342}, // Case map + 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map + 0x1FF8: []rune{0x1F78}, // Case map + 0x1FF9: []rune{0x1F79}, // Case map + 0x1FFA: []rune{0x1F7C}, // Case map + 0x1FFB: []rune{0x1F7D}, // Case map + 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map + 0x20A8: []rune{0x0072, 0x0073}, // Additional folding + 0x2102: []rune{0x0063}, // Additional folding + 0x2103: []rune{0x00B0, 0x0063}, // Additional folding + 0x2107: []rune{0x025B}, // Additional folding + 0x2109: []rune{0x00B0, 0x0066}, // Additional folding + 0x210B: []rune{0x0068}, // Additional folding + 0x210C: []rune{0x0068}, // Additional folding + 0x210D: []rune{0x0068}, // Additional folding + 0x2110: []rune{0x0069}, // Additional folding + 0x2111: []rune{0x0069}, // Additional folding + 0x2112: []rune{0x006C}, // Additional folding + 0x2115: []rune{0x006E}, // Additional folding + 0x2116: []rune{0x006E, 0x006F}, // Additional folding + 0x2119: []rune{0x0070}, // Additional folding + 0x211A: []rune{0x0071}, // Additional folding + 0x211B: []rune{0x0072}, // Additional folding + 0x211C: []rune{0x0072}, // Additional folding + 0x211D: []rune{0x0072}, // Additional folding + 0x2120: []rune{0x0073, 0x006D}, // Additional folding + 0x2121: []rune{0x0074, 0x0065, 0x006C}, // Additional folding + 0x2122: []rune{0x0074, 0x006D}, // Additional folding + 0x2124: []rune{0x007A}, // Additional folding + 0x2126: []rune{0x03C9}, // Case map + 0x2128: []rune{0x007A}, // Additional folding + 0x212A: []rune{0x006B}, // Case map + 0x212B: []rune{0x00E5}, // Case map + 0x212C: []rune{0x0062}, // Additional folding + 0x212D: []rune{0x0063}, // Additional folding + 0x2130: []rune{0x0065}, // Additional folding + 0x2131: []rune{0x0066}, // Additional folding + 0x2133: []rune{0x006D}, // Additional folding + 0x213E: []rune{0x03B3}, // Additional folding + 0x213F: []rune{0x03C0}, // Additional folding + 0x2145: []rune{0x0064}, // Additional folding + 0x2160: []rune{0x2170}, // Case map + 0x2161: []rune{0x2171}, // Case map + 0x2162: []rune{0x2172}, // Case map + 0x2163: []rune{0x2173}, // Case map + 0x2164: []rune{0x2174}, // Case map + 0x2165: []rune{0x2175}, // Case map + 0x2166: []rune{0x2176}, // Case map + 0x2167: []rune{0x2177}, // Case map + 0x2168: []rune{0x2178}, // Case map + 0x2169: []rune{0x2179}, // Case map + 0x216A: []rune{0x217A}, // Case map + 0x216B: []rune{0x217B}, // Case map + 0x216C: []rune{0x217C}, // Case map + 0x216D: []rune{0x217D}, // Case map + 0x216E: []rune{0x217E}, // Case map + 0x216F: []rune{0x217F}, // Case map + 0x24B6: []rune{0x24D0}, // Case map + 0x24B7: []rune{0x24D1}, // Case map + 0x24B8: []rune{0x24D2}, // Case map + 0x24B9: []rune{0x24D3}, // Case map + 0x24BA: []rune{0x24D4}, // Case map + 0x24BB: []rune{0x24D5}, // Case map + 0x24BC: []rune{0x24D6}, // Case map + 0x24BD: []rune{0x24D7}, // Case map + 0x24BE: []rune{0x24D8}, // Case map + 0x24BF: []rune{0x24D9}, // Case map + 0x24C0: []rune{0x24DA}, // Case map + 0x24C1: []rune{0x24DB}, // Case map + 0x24C2: []rune{0x24DC}, // Case map + 0x24C3: []rune{0x24DD}, // Case map + 0x24C4: []rune{0x24DE}, // Case map + 0x24C5: []rune{0x24DF}, // Case map + 0x24C6: []rune{0x24E0}, // Case map + 0x24C7: []rune{0x24E1}, // Case map + 0x24C8: []rune{0x24E2}, // Case map + 0x24C9: []rune{0x24E3}, // Case map + 0x24CA: []rune{0x24E4}, // Case map + 0x24CB: []rune{0x24E5}, // Case map + 0x24CC: []rune{0x24E6}, // Case map + 0x24CD: []rune{0x24E7}, // Case map + 0x24CE: []rune{0x24E8}, // Case map + 0x24CF: []rune{0x24E9}, // Case map + 0x3371: []rune{0x0068, 0x0070, 0x0061}, // Additional folding + 0x3373: []rune{0x0061, 0x0075}, // Additional folding + 0x3375: []rune{0x006F, 0x0076}, // Additional folding + 0x3380: []rune{0x0070, 0x0061}, // Additional folding + 0x3381: []rune{0x006E, 0x0061}, // Additional folding + 0x3382: []rune{0x03BC, 0x0061}, // Additional folding + 0x3383: []rune{0x006D, 0x0061}, // Additional folding + 0x3384: []rune{0x006B, 0x0061}, // Additional folding + 0x3385: []rune{0x006B, 0x0062}, // Additional folding + 0x3386: []rune{0x006D, 0x0062}, // Additional folding + 0x3387: []rune{0x0067, 0x0062}, // Additional folding + 0x338A: []rune{0x0070, 0x0066}, // Additional folding + 0x338B: []rune{0x006E, 0x0066}, // Additional folding + 0x338C: []rune{0x03BC, 0x0066}, // Additional folding + 0x3390: []rune{0x0068, 0x007A}, // Additional folding + 0x3391: []rune{0x006B, 0x0068, 0x007A}, // Additional folding + 0x3392: []rune{0x006D, 0x0068, 0x007A}, // Additional folding + 0x3393: []rune{0x0067, 0x0068, 0x007A}, // Additional folding + 0x3394: []rune{0x0074, 0x0068, 0x007A}, // Additional folding + 0x33A9: []rune{0x0070, 0x0061}, // Additional folding + 0x33AA: []rune{0x006B, 0x0070, 0x0061}, // Additional folding + 0x33AB: []rune{0x006D, 0x0070, 0x0061}, // Additional folding + 0x33AC: []rune{0x0067, 0x0070, 0x0061}, // Additional folding + 0x33B4: []rune{0x0070, 0x0076}, // Additional folding + 0x33B5: []rune{0x006E, 0x0076}, // Additional folding + 0x33B6: []rune{0x03BC, 0x0076}, // Additional folding + 0x33B7: []rune{0x006D, 0x0076}, // Additional folding + 0x33B8: []rune{0x006B, 0x0076}, // Additional folding + 0x33B9: []rune{0x006D, 0x0076}, // Additional folding + 0x33BA: []rune{0x0070, 0x0077}, // Additional folding + 0x33BB: []rune{0x006E, 0x0077}, // Additional folding + 0x33BC: []rune{0x03BC, 0x0077}, // Additional folding + 0x33BD: []rune{0x006D, 0x0077}, // Additional folding + 0x33BE: []rune{0x006B, 0x0077}, // Additional folding + 0x33BF: []rune{0x006D, 0x0077}, // Additional folding + 0x33C0: []rune{0x006B, 0x03C9}, // Additional folding + 0x33C1: []rune{0x006D, 0x03C9}, // Additional folding + 0x33C3: []rune{0x0062, 0x0071}, // Additional folding + 0x33C6: []rune{0x0063, 0x2215, 0x006B, 0x0067}, // Additional folding + 0x33C7: []rune{0x0063, 0x006F, 0x002E}, // Additional folding + 0x33C8: []rune{0x0064, 0x0062}, // Additional folding + 0x33C9: []rune{0x0067, 0x0079}, // Additional folding + 0x33CB: []rune{0x0068, 0x0070}, // Additional folding + 0x33CD: []rune{0x006B, 0x006B}, // Additional folding + 0x33CE: []rune{0x006B, 0x006D}, // Additional folding + 0x33D7: []rune{0x0070, 0x0068}, // Additional folding + 0x33D9: []rune{0x0070, 0x0070, 0x006D}, // Additional folding + 0x33DA: []rune{0x0070, 0x0072}, // Additional folding + 0x33DC: []rune{0x0073, 0x0076}, // Additional folding + 0x33DD: []rune{0x0077, 0x0062}, // Additional folding + 0xFB00: []rune{0x0066, 0x0066}, // Case map + 0xFB01: []rune{0x0066, 0x0069}, // Case map + 0xFB02: []rune{0x0066, 0x006C}, // Case map + 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map + 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map + 0xFB05: []rune{0x0073, 0x0074}, // Case map + 0xFB06: []rune{0x0073, 0x0074}, // Case map + 0xFB13: []rune{0x0574, 0x0576}, // Case map + 0xFB14: []rune{0x0574, 0x0565}, // Case map + 0xFB15: []rune{0x0574, 0x056B}, // Case map + 0xFB16: []rune{0x057E, 0x0576}, // Case map + 0xFB17: []rune{0x0574, 0x056D}, // Case map + 0xFF21: []rune{0xFF41}, // Case map + 0xFF22: []rune{0xFF42}, // Case map + 0xFF23: []rune{0xFF43}, // Case map + 0xFF24: []rune{0xFF44}, // Case map + 0xFF25: []rune{0xFF45}, // Case map + 0xFF26: []rune{0xFF46}, // Case map + 0xFF27: []rune{0xFF47}, // Case map + 0xFF28: []rune{0xFF48}, // Case map + 0xFF29: []rune{0xFF49}, // Case map + 0xFF2A: []rune{0xFF4A}, // Case map + 0xFF2B: []rune{0xFF4B}, // Case map + 0xFF2C: []rune{0xFF4C}, // Case map + 0xFF2D: []rune{0xFF4D}, // Case map + 0xFF2E: []rune{0xFF4E}, // Case map + 0xFF2F: []rune{0xFF4F}, // Case map + 0xFF30: []rune{0xFF50}, // Case map + 0xFF31: []rune{0xFF51}, // Case map + 0xFF32: []rune{0xFF52}, // Case map + 0xFF33: []rune{0xFF53}, // Case map + 0xFF34: []rune{0xFF54}, // Case map + 0xFF35: []rune{0xFF55}, // Case map + 0xFF36: []rune{0xFF56}, // Case map + 0xFF37: []rune{0xFF57}, // Case map + 0xFF38: []rune{0xFF58}, // Case map + 0xFF39: []rune{0xFF59}, // Case map + 0xFF3A: []rune{0xFF5A}, // Case map + 0x10400: []rune{0x10428}, // Case map + 0x10401: []rune{0x10429}, // Case map + 0x10402: []rune{0x1042A}, // Case map + 0x10403: []rune{0x1042B}, // Case map + 0x10404: []rune{0x1042C}, // Case map + 0x10405: []rune{0x1042D}, // Case map + 0x10406: []rune{0x1042E}, // Case map + 0x10407: []rune{0x1042F}, // Case map + 0x10408: []rune{0x10430}, // Case map + 0x10409: []rune{0x10431}, // Case map + 0x1040A: []rune{0x10432}, // Case map + 0x1040B: []rune{0x10433}, // Case map + 0x1040C: []rune{0x10434}, // Case map + 0x1040D: []rune{0x10435}, // Case map + 0x1040E: []rune{0x10436}, // Case map + 0x1040F: []rune{0x10437}, // Case map + 0x10410: []rune{0x10438}, // Case map + 0x10411: []rune{0x10439}, // Case map + 0x10412: []rune{0x1043A}, // Case map + 0x10413: []rune{0x1043B}, // Case map + 0x10414: []rune{0x1043C}, // Case map + 0x10415: []rune{0x1043D}, // Case map + 0x10416: []rune{0x1043E}, // Case map + 0x10417: []rune{0x1043F}, // Case map + 0x10418: []rune{0x10440}, // Case map + 0x10419: []rune{0x10441}, // Case map + 0x1041A: []rune{0x10442}, // Case map + 0x1041B: []rune{0x10443}, // Case map + 0x1041C: []rune{0x10444}, // Case map + 0x1041D: []rune{0x10445}, // Case map + 0x1041E: []rune{0x10446}, // Case map + 0x1041F: []rune{0x10447}, // Case map + 0x10420: []rune{0x10448}, // Case map + 0x10421: []rune{0x10449}, // Case map + 0x10422: []rune{0x1044A}, // Case map + 0x10423: []rune{0x1044B}, // Case map + 0x10424: []rune{0x1044C}, // Case map + 0x10425: []rune{0x1044D}, // Case map + 0x1D400: []rune{0x0061}, // Additional folding + 0x1D401: []rune{0x0062}, // Additional folding + 0x1D402: []rune{0x0063}, // Additional folding + 0x1D403: []rune{0x0064}, // Additional folding + 0x1D404: []rune{0x0065}, // Additional folding + 0x1D405: []rune{0x0066}, // Additional folding + 0x1D406: []rune{0x0067}, // Additional folding + 0x1D407: []rune{0x0068}, // Additional folding + 0x1D408: []rune{0x0069}, // Additional folding + 0x1D409: []rune{0x006A}, // Additional folding + 0x1D40A: []rune{0x006B}, // Additional folding + 0x1D40B: []rune{0x006C}, // Additional folding + 0x1D40C: []rune{0x006D}, // Additional folding + 0x1D40D: []rune{0x006E}, // Additional folding + 0x1D40E: []rune{0x006F}, // Additional folding + 0x1D40F: []rune{0x0070}, // Additional folding + 0x1D410: []rune{0x0071}, // Additional folding + 0x1D411: []rune{0x0072}, // Additional folding + 0x1D412: []rune{0x0073}, // Additional folding + 0x1D413: []rune{0x0074}, // Additional folding + 0x1D414: []rune{0x0075}, // Additional folding + 0x1D415: []rune{0x0076}, // Additional folding + 0x1D416: []rune{0x0077}, // Additional folding + 0x1D417: []rune{0x0078}, // Additional folding + 0x1D418: []rune{0x0079}, // Additional folding + 0x1D419: []rune{0x007A}, // Additional folding + 0x1D434: []rune{0x0061}, // Additional folding + 0x1D435: []rune{0x0062}, // Additional folding + 0x1D436: []rune{0x0063}, // Additional folding + 0x1D437: []rune{0x0064}, // Additional folding + 0x1D438: []rune{0x0065}, // Additional folding + 0x1D439: []rune{0x0066}, // Additional folding + 0x1D43A: []rune{0x0067}, // Additional folding + 0x1D43B: []rune{0x0068}, // Additional folding + 0x1D43C: []rune{0x0069}, // Additional folding + 0x1D43D: []rune{0x006A}, // Additional folding + 0x1D43E: []rune{0x006B}, // Additional folding + 0x1D43F: []rune{0x006C}, // Additional folding + 0x1D440: []rune{0x006D}, // Additional folding + 0x1D441: []rune{0x006E}, // Additional folding + 0x1D442: []rune{0x006F}, // Additional folding + 0x1D443: []rune{0x0070}, // Additional folding + 0x1D444: []rune{0x0071}, // Additional folding + 0x1D445: []rune{0x0072}, // Additional folding + 0x1D446: []rune{0x0073}, // Additional folding + 0x1D447: []rune{0x0074}, // Additional folding + 0x1D448: []rune{0x0075}, // Additional folding + 0x1D449: []rune{0x0076}, // Additional folding + 0x1D44A: []rune{0x0077}, // Additional folding + 0x1D44B: []rune{0x0078}, // Additional folding + 0x1D44C: []rune{0x0079}, // Additional folding + 0x1D44D: []rune{0x007A}, // Additional folding + 0x1D468: []rune{0x0061}, // Additional folding + 0x1D469: []rune{0x0062}, // Additional folding + 0x1D46A: []rune{0x0063}, // Additional folding + 0x1D46B: []rune{0x0064}, // Additional folding + 0x1D46C: []rune{0x0065}, // Additional folding + 0x1D46D: []rune{0x0066}, // Additional folding + 0x1D46E: []rune{0x0067}, // Additional folding + 0x1D46F: []rune{0x0068}, // Additional folding + 0x1D470: []rune{0x0069}, // Additional folding + 0x1D471: []rune{0x006A}, // Additional folding + 0x1D472: []rune{0x006B}, // Additional folding + 0x1D473: []rune{0x006C}, // Additional folding + 0x1D474: []rune{0x006D}, // Additional folding + 0x1D475: []rune{0x006E}, // Additional folding + 0x1D476: []rune{0x006F}, // Additional folding + 0x1D477: []rune{0x0070}, // Additional folding + 0x1D478: []rune{0x0071}, // Additional folding + 0x1D479: []rune{0x0072}, // Additional folding + 0x1D47A: []rune{0x0073}, // Additional folding + 0x1D47B: []rune{0x0074}, // Additional folding + 0x1D47C: []rune{0x0075}, // Additional folding + 0x1D47D: []rune{0x0076}, // Additional folding + 0x1D47E: []rune{0x0077}, // Additional folding + 0x1D47F: []rune{0x0078}, // Additional folding + 0x1D480: []rune{0x0079}, // Additional folding + 0x1D481: []rune{0x007A}, // Additional folding + 0x1D49C: []rune{0x0061}, // Additional folding + 0x1D49E: []rune{0x0063}, // Additional folding + 0x1D49F: []rune{0x0064}, // Additional folding + 0x1D4A2: []rune{0x0067}, // Additional folding + 0x1D4A5: []rune{0x006A}, // Additional folding + 0x1D4A6: []rune{0x006B}, // Additional folding + 0x1D4A9: []rune{0x006E}, // Additional folding + 0x1D4AA: []rune{0x006F}, // Additional folding + 0x1D4AB: []rune{0x0070}, // Additional folding + 0x1D4AC: []rune{0x0071}, // Additional folding + 0x1D4AE: []rune{0x0073}, // Additional folding + 0x1D4AF: []rune{0x0074}, // Additional folding + 0x1D4B0: []rune{0x0075}, // Additional folding + 0x1D4B1: []rune{0x0076}, // Additional folding + 0x1D4B2: []rune{0x0077}, // Additional folding + 0x1D4B3: []rune{0x0078}, // Additional folding + 0x1D4B4: []rune{0x0079}, // Additional folding + 0x1D4B5: []rune{0x007A}, // Additional folding + 0x1D4D0: []rune{0x0061}, // Additional folding + 0x1D4D1: []rune{0x0062}, // Additional folding + 0x1D4D2: []rune{0x0063}, // Additional folding + 0x1D4D3: []rune{0x0064}, // Additional folding + 0x1D4D4: []rune{0x0065}, // Additional folding + 0x1D4D5: []rune{0x0066}, // Additional folding + 0x1D4D6: []rune{0x0067}, // Additional folding + 0x1D4D7: []rune{0x0068}, // Additional folding + 0x1D4D8: []rune{0x0069}, // Additional folding + 0x1D4D9: []rune{0x006A}, // Additional folding + 0x1D4DA: []rune{0x006B}, // Additional folding + 0x1D4DB: []rune{0x006C}, // Additional folding + 0x1D4DC: []rune{0x006D}, // Additional folding + 0x1D4DD: []rune{0x006E}, // Additional folding + 0x1D4DE: []rune{0x006F}, // Additional folding + 0x1D4DF: []rune{0x0070}, // Additional folding + 0x1D4E0: []rune{0x0071}, // Additional folding + 0x1D4E1: []rune{0x0072}, // Additional folding + 0x1D4E2: []rune{0x0073}, // Additional folding + 0x1D4E3: []rune{0x0074}, // Additional folding + 0x1D4E4: []rune{0x0075}, // Additional folding + 0x1D4E5: []rune{0x0076}, // Additional folding + 0x1D4E6: []rune{0x0077}, // Additional folding + 0x1D4E7: []rune{0x0078}, // Additional folding + 0x1D4E8: []rune{0x0079}, // Additional folding + 0x1D4E9: []rune{0x007A}, // Additional folding + 0x1D504: []rune{0x0061}, // Additional folding + 0x1D505: []rune{0x0062}, // Additional folding + 0x1D507: []rune{0x0064}, // Additional folding + 0x1D508: []rune{0x0065}, // Additional folding + 0x1D509: []rune{0x0066}, // Additional folding + 0x1D50A: []rune{0x0067}, // Additional folding + 0x1D50D: []rune{0x006A}, // Additional folding + 0x1D50E: []rune{0x006B}, // Additional folding + 0x1D50F: []rune{0x006C}, // Additional folding + 0x1D510: []rune{0x006D}, // Additional folding + 0x1D511: []rune{0x006E}, // Additional folding + 0x1D512: []rune{0x006F}, // Additional folding + 0x1D513: []rune{0x0070}, // Additional folding + 0x1D514: []rune{0x0071}, // Additional folding + 0x1D516: []rune{0x0073}, // Additional folding + 0x1D517: []rune{0x0074}, // Additional folding + 0x1D518: []rune{0x0075}, // Additional folding + 0x1D519: []rune{0x0076}, // Additional folding + 0x1D51A: []rune{0x0077}, // Additional folding + 0x1D51B: []rune{0x0078}, // Additional folding + 0x1D51C: []rune{0x0079}, // Additional folding + 0x1D538: []rune{0x0061}, // Additional folding + 0x1D539: []rune{0x0062}, // Additional folding + 0x1D53B: []rune{0x0064}, // Additional folding + 0x1D53C: []rune{0x0065}, // Additional folding + 0x1D53D: []rune{0x0066}, // Additional folding + 0x1D53E: []rune{0x0067}, // Additional folding + 0x1D540: []rune{0x0069}, // Additional folding + 0x1D541: []rune{0x006A}, // Additional folding + 0x1D542: []rune{0x006B}, // Additional folding + 0x1D543: []rune{0x006C}, // Additional folding + 0x1D544: []rune{0x006D}, // Additional folding + 0x1D546: []rune{0x006F}, // Additional folding + 0x1D54A: []rune{0x0073}, // Additional folding + 0x1D54B: []rune{0x0074}, // Additional folding + 0x1D54C: []rune{0x0075}, // Additional folding + 0x1D54D: []rune{0x0076}, // Additional folding + 0x1D54E: []rune{0x0077}, // Additional folding + 0x1D54F: []rune{0x0078}, // Additional folding + 0x1D550: []rune{0x0079}, // Additional folding + 0x1D56C: []rune{0x0061}, // Additional folding + 0x1D56D: []rune{0x0062}, // Additional folding + 0x1D56E: []rune{0x0063}, // Additional folding + 0x1D56F: []rune{0x0064}, // Additional folding + 0x1D570: []rune{0x0065}, // Additional folding + 0x1D571: []rune{0x0066}, // Additional folding + 0x1D572: []rune{0x0067}, // Additional folding + 0x1D573: []rune{0x0068}, // Additional folding + 0x1D574: []rune{0x0069}, // Additional folding + 0x1D575: []rune{0x006A}, // Additional folding + 0x1D576: []rune{0x006B}, // Additional folding + 0x1D577: []rune{0x006C}, // Additional folding + 0x1D578: []rune{0x006D}, // Additional folding + 0x1D579: []rune{0x006E}, // Additional folding + 0x1D57A: []rune{0x006F}, // Additional folding + 0x1D57B: []rune{0x0070}, // Additional folding + 0x1D57C: []rune{0x0071}, // Additional folding + 0x1D57D: []rune{0x0072}, // Additional folding + 0x1D57E: []rune{0x0073}, // Additional folding + 0x1D57F: []rune{0x0074}, // Additional folding + 0x1D580: []rune{0x0075}, // Additional folding + 0x1D581: []rune{0x0076}, // Additional folding + 0x1D582: []rune{0x0077}, // Additional folding + 0x1D583: []rune{0x0078}, // Additional folding + 0x1D584: []rune{0x0079}, // Additional folding + 0x1D585: []rune{0x007A}, // Additional folding + 0x1D5A0: []rune{0x0061}, // Additional folding + 0x1D5A1: []rune{0x0062}, // Additional folding + 0x1D5A2: []rune{0x0063}, // Additional folding + 0x1D5A3: []rune{0x0064}, // Additional folding + 0x1D5A4: []rune{0x0065}, // Additional folding + 0x1D5A5: []rune{0x0066}, // Additional folding + 0x1D5A6: []rune{0x0067}, // Additional folding + 0x1D5A7: []rune{0x0068}, // Additional folding + 0x1D5A8: []rune{0x0069}, // Additional folding + 0x1D5A9: []rune{0x006A}, // Additional folding + 0x1D5AA: []rune{0x006B}, // Additional folding + 0x1D5AB: []rune{0x006C}, // Additional folding + 0x1D5AC: []rune{0x006D}, // Additional folding + 0x1D5AD: []rune{0x006E}, // Additional folding + 0x1D5AE: []rune{0x006F}, // Additional folding + 0x1D5AF: []rune{0x0070}, // Additional folding + 0x1D5B0: []rune{0x0071}, // Additional folding + 0x1D5B1: []rune{0x0072}, // Additional folding + 0x1D5B2: []rune{0x0073}, // Additional folding + 0x1D5B3: []rune{0x0074}, // Additional folding + 0x1D5B4: []rune{0x0075}, // Additional folding + 0x1D5B5: []rune{0x0076}, // Additional folding + 0x1D5B6: []rune{0x0077}, // Additional folding + 0x1D5B7: []rune{0x0078}, // Additional folding + 0x1D5B8: []rune{0x0079}, // Additional folding + 0x1D5B9: []rune{0x007A}, // Additional folding + 0x1D5D4: []rune{0x0061}, // Additional folding + 0x1D5D5: []rune{0x0062}, // Additional folding + 0x1D5D6: []rune{0x0063}, // Additional folding + 0x1D5D7: []rune{0x0064}, // Additional folding + 0x1D5D8: []rune{0x0065}, // Additional folding + 0x1D5D9: []rune{0x0066}, // Additional folding + 0x1D5DA: []rune{0x0067}, // Additional folding + 0x1D5DB: []rune{0x0068}, // Additional folding + 0x1D5DC: []rune{0x0069}, // Additional folding + 0x1D5DD: []rune{0x006A}, // Additional folding + 0x1D5DE: []rune{0x006B}, // Additional folding + 0x1D5DF: []rune{0x006C}, // Additional folding + 0x1D5E0: []rune{0x006D}, // Additional folding + 0x1D5E1: []rune{0x006E}, // Additional folding + 0x1D5E2: []rune{0x006F}, // Additional folding + 0x1D5E3: []rune{0x0070}, // Additional folding + 0x1D5E4: []rune{0x0071}, // Additional folding + 0x1D5E5: []rune{0x0072}, // Additional folding + 0x1D5E6: []rune{0x0073}, // Additional folding + 0x1D5E7: []rune{0x0074}, // Additional folding + 0x1D5E8: []rune{0x0075}, // Additional folding + 0x1D5E9: []rune{0x0076}, // Additional folding + 0x1D5EA: []rune{0x0077}, // Additional folding + 0x1D5EB: []rune{0x0078}, // Additional folding + 0x1D5EC: []rune{0x0079}, // Additional folding + 0x1D5ED: []rune{0x007A}, // Additional folding + 0x1D608: []rune{0x0061}, // Additional folding + 0x1D609: []rune{0x0062}, // Additional folding + 0x1D60A: []rune{0x0063}, // Additional folding + 0x1D60B: []rune{0x0064}, // Additional folding + 0x1D60C: []rune{0x0065}, // Additional folding + 0x1D60D: []rune{0x0066}, // Additional folding + 0x1D60E: []rune{0x0067}, // Additional folding + 0x1D60F: []rune{0x0068}, // Additional folding + 0x1D610: []rune{0x0069}, // Additional folding + 0x1D611: []rune{0x006A}, // Additional folding + 0x1D612: []rune{0x006B}, // Additional folding + 0x1D613: []rune{0x006C}, // Additional folding + 0x1D614: []rune{0x006D}, // Additional folding + 0x1D615: []rune{0x006E}, // Additional folding + 0x1D616: []rune{0x006F}, // Additional folding + 0x1D617: []rune{0x0070}, // Additional folding + 0x1D618: []rune{0x0071}, // Additional folding + 0x1D619: []rune{0x0072}, // Additional folding + 0x1D61A: []rune{0x0073}, // Additional folding + 0x1D61B: []rune{0x0074}, // Additional folding + 0x1D61C: []rune{0x0075}, // Additional folding + 0x1D61D: []rune{0x0076}, // Additional folding + 0x1D61E: []rune{0x0077}, // Additional folding + 0x1D61F: []rune{0x0078}, // Additional folding + 0x1D620: []rune{0x0079}, // Additional folding + 0x1D621: []rune{0x007A}, // Additional folding + 0x1D63C: []rune{0x0061}, // Additional folding + 0x1D63D: []rune{0x0062}, // Additional folding + 0x1D63E: []rune{0x0063}, // Additional folding + 0x1D63F: []rune{0x0064}, // Additional folding + 0x1D640: []rune{0x0065}, // Additional folding + 0x1D641: []rune{0x0066}, // Additional folding + 0x1D642: []rune{0x0067}, // Additional folding + 0x1D643: []rune{0x0068}, // Additional folding + 0x1D644: []rune{0x0069}, // Additional folding + 0x1D645: []rune{0x006A}, // Additional folding + 0x1D646: []rune{0x006B}, // Additional folding + 0x1D647: []rune{0x006C}, // Additional folding + 0x1D648: []rune{0x006D}, // Additional folding + 0x1D649: []rune{0x006E}, // Additional folding + 0x1D64A: []rune{0x006F}, // Additional folding + 0x1D64B: []rune{0x0070}, // Additional folding + 0x1D64C: []rune{0x0071}, // Additional folding + 0x1D64D: []rune{0x0072}, // Additional folding + 0x1D64E: []rune{0x0073}, // Additional folding + 0x1D64F: []rune{0x0074}, // Additional folding + 0x1D650: []rune{0x0075}, // Additional folding + 0x1D651: []rune{0x0076}, // Additional folding + 0x1D652: []rune{0x0077}, // Additional folding + 0x1D653: []rune{0x0078}, // Additional folding + 0x1D654: []rune{0x0079}, // Additional folding + 0x1D655: []rune{0x007A}, // Additional folding + 0x1D670: []rune{0x0061}, // Additional folding + 0x1D671: []rune{0x0062}, // Additional folding + 0x1D672: []rune{0x0063}, // Additional folding + 0x1D673: []rune{0x0064}, // Additional folding + 0x1D674: []rune{0x0065}, // Additional folding + 0x1D675: []rune{0x0066}, // Additional folding + 0x1D676: []rune{0x0067}, // Additional folding + 0x1D677: []rune{0x0068}, // Additional folding + 0x1D678: []rune{0x0069}, // Additional folding + 0x1D679: []rune{0x006A}, // Additional folding + 0x1D67A: []rune{0x006B}, // Additional folding + 0x1D67B: []rune{0x006C}, // Additional folding + 0x1D67C: []rune{0x006D}, // Additional folding + 0x1D67D: []rune{0x006E}, // Additional folding + 0x1D67E: []rune{0x006F}, // Additional folding + 0x1D67F: []rune{0x0070}, // Additional folding + 0x1D680: []rune{0x0071}, // Additional folding + 0x1D681: []rune{0x0072}, // Additional folding + 0x1D682: []rune{0x0073}, // Additional folding + 0x1D683: []rune{0x0074}, // Additional folding + 0x1D684: []rune{0x0075}, // Additional folding + 0x1D685: []rune{0x0076}, // Additional folding + 0x1D686: []rune{0x0077}, // Additional folding + 0x1D687: []rune{0x0078}, // Additional folding + 0x1D688: []rune{0x0079}, // Additional folding + 0x1D689: []rune{0x007A}, // Additional folding + 0x1D6A8: []rune{0x03B1}, // Additional folding + 0x1D6A9: []rune{0x03B2}, // Additional folding + 0x1D6AA: []rune{0x03B3}, // Additional folding + 0x1D6AB: []rune{0x03B4}, // Additional folding + 0x1D6AC: []rune{0x03B5}, // Additional folding + 0x1D6AD: []rune{0x03B6}, // Additional folding + 0x1D6AE: []rune{0x03B7}, // Additional folding + 0x1D6AF: []rune{0x03B8}, // Additional folding + 0x1D6B0: []rune{0x03B9}, // Additional folding + 0x1D6B1: []rune{0x03BA}, // Additional folding + 0x1D6B2: []rune{0x03BB}, // Additional folding + 0x1D6B3: []rune{0x03BC}, // Additional folding + 0x1D6B4: []rune{0x03BD}, // Additional folding + 0x1D6B5: []rune{0x03BE}, // Additional folding + 0x1D6B6: []rune{0x03BF}, // Additional folding + 0x1D6B7: []rune{0x03C0}, // Additional folding + 0x1D6B8: []rune{0x03C1}, // Additional folding + 0x1D6B9: []rune{0x03B8}, // Additional folding + 0x1D6BA: []rune{0x03C3}, // Additional folding + 0x1D6BB: []rune{0x03C4}, // Additional folding + 0x1D6BC: []rune{0x03C5}, // Additional folding + 0x1D6BD: []rune{0x03C6}, // Additional folding + 0x1D6BE: []rune{0x03C7}, // Additional folding + 0x1D6BF: []rune{0x03C8}, // Additional folding + 0x1D6C0: []rune{0x03C9}, // Additional folding + 0x1D6D3: []rune{0x03C3}, // Additional folding + 0x1D6E2: []rune{0x03B1}, // Additional folding + 0x1D6E3: []rune{0x03B2}, // Additional folding + 0x1D6E4: []rune{0x03B3}, // Additional folding + 0x1D6E5: []rune{0x03B4}, // Additional folding + 0x1D6E6: []rune{0x03B5}, // Additional folding + 0x1D6E7: []rune{0x03B6}, // Additional folding + 0x1D6E8: []rune{0x03B7}, // Additional folding + 0x1D6E9: []rune{0x03B8}, // Additional folding + 0x1D6EA: []rune{0x03B9}, // Additional folding + 0x1D6EB: []rune{0x03BA}, // Additional folding + 0x1D6EC: []rune{0x03BB}, // Additional folding + 0x1D6ED: []rune{0x03BC}, // Additional folding + 0x1D6EE: []rune{0x03BD}, // Additional folding + 0x1D6EF: []rune{0x03BE}, // Additional folding + 0x1D6F0: []rune{0x03BF}, // Additional folding + 0x1D6F1: []rune{0x03C0}, // Additional folding + 0x1D6F2: []rune{0x03C1}, // Additional folding + 0x1D6F3: []rune{0x03B8}, // Additional folding + 0x1D6F4: []rune{0x03C3}, // Additional folding + 0x1D6F5: []rune{0x03C4}, // Additional folding + 0x1D6F6: []rune{0x03C5}, // Additional folding + 0x1D6F7: []rune{0x03C6}, // Additional folding + 0x1D6F8: []rune{0x03C7}, // Additional folding + 0x1D6F9: []rune{0x03C8}, // Additional folding + 0x1D6FA: []rune{0x03C9}, // Additional folding + 0x1D70D: []rune{0x03C3}, // Additional folding + 0x1D71C: []rune{0x03B1}, // Additional folding + 0x1D71D: []rune{0x03B2}, // Additional folding + 0x1D71E: []rune{0x03B3}, // Additional folding + 0x1D71F: []rune{0x03B4}, // Additional folding + 0x1D720: []rune{0x03B5}, // Additional folding + 0x1D721: []rune{0x03B6}, // Additional folding + 0x1D722: []rune{0x03B7}, // Additional folding + 0x1D723: []rune{0x03B8}, // Additional folding + 0x1D724: []rune{0x03B9}, // Additional folding + 0x1D725: []rune{0x03BA}, // Additional folding + 0x1D726: []rune{0x03BB}, // Additional folding + 0x1D727: []rune{0x03BC}, // Additional folding + 0x1D728: []rune{0x03BD}, // Additional folding + 0x1D729: []rune{0x03BE}, // Additional folding + 0x1D72A: []rune{0x03BF}, // Additional folding + 0x1D72B: []rune{0x03C0}, // Additional folding + 0x1D72C: []rune{0x03C1}, // Additional folding + 0x1D72D: []rune{0x03B8}, // Additional folding + 0x1D72E: []rune{0x03C3}, // Additional folding + 0x1D72F: []rune{0x03C4}, // Additional folding + 0x1D730: []rune{0x03C5}, // Additional folding + 0x1D731: []rune{0x03C6}, // Additional folding + 0x1D732: []rune{0x03C7}, // Additional folding + 0x1D733: []rune{0x03C8}, // Additional folding + 0x1D734: []rune{0x03C9}, // Additional folding + 0x1D747: []rune{0x03C3}, // Additional folding + 0x1D756: []rune{0x03B1}, // Additional folding + 0x1D757: []rune{0x03B2}, // Additional folding + 0x1D758: []rune{0x03B3}, // Additional folding + 0x1D759: []rune{0x03B4}, // Additional folding + 0x1D75A: []rune{0x03B5}, // Additional folding + 0x1D75B: []rune{0x03B6}, // Additional folding + 0x1D75C: []rune{0x03B7}, // Additional folding + 0x1D75D: []rune{0x03B8}, // Additional folding + 0x1D75E: []rune{0x03B9}, // Additional folding + 0x1D75F: []rune{0x03BA}, // Additional folding + 0x1D760: []rune{0x03BB}, // Additional folding + 0x1D761: []rune{0x03BC}, // Additional folding + 0x1D762: []rune{0x03BD}, // Additional folding + 0x1D763: []rune{0x03BE}, // Additional folding + 0x1D764: []rune{0x03BF}, // Additional folding + 0x1D765: []rune{0x03C0}, // Additional folding + 0x1D766: []rune{0x03C1}, // Additional folding + 0x1D767: []rune{0x03B8}, // Additional folding + 0x1D768: []rune{0x03C3}, // Additional folding + 0x1D769: []rune{0x03C4}, // Additional folding + 0x1D76A: []rune{0x03C5}, // Additional folding + 0x1D76B: []rune{0x03C6}, // Additional folding + 0x1D76C: []rune{0x03C7}, // Additional folding + 0x1D76D: []rune{0x03C8}, // Additional folding + 0x1D76E: []rune{0x03C9}, // Additional folding + 0x1D781: []rune{0x03C3}, // Additional folding + 0x1D790: []rune{0x03B1}, // Additional folding + 0x1D791: []rune{0x03B2}, // Additional folding + 0x1D792: []rune{0x03B3}, // Additional folding + 0x1D793: []rune{0x03B4}, // Additional folding + 0x1D794: []rune{0x03B5}, // Additional folding + 0x1D795: []rune{0x03B6}, // Additional folding + 0x1D796: []rune{0x03B7}, // Additional folding + 0x1D797: []rune{0x03B8}, // Additional folding + 0x1D798: []rune{0x03B9}, // Additional folding + 0x1D799: []rune{0x03BA}, // Additional folding + 0x1D79A: []rune{0x03BB}, // Additional folding + 0x1D79B: []rune{0x03BC}, // Additional folding + 0x1D79C: []rune{0x03BD}, // Additional folding + 0x1D79D: []rune{0x03BE}, // Additional folding + 0x1D79E: []rune{0x03BF}, // Additional folding + 0x1D79F: []rune{0x03C0}, // Additional folding + 0x1D7A0: []rune{0x03C1}, // Additional folding + 0x1D7A1: []rune{0x03B8}, // Additional folding + 0x1D7A2: []rune{0x03C3}, // Additional folding + 0x1D7A3: []rune{0x03C4}, // Additional folding + 0x1D7A4: []rune{0x03C5}, // Additional folding + 0x1D7A5: []rune{0x03C6}, // Additional folding + 0x1D7A6: []rune{0x03C7}, // Additional folding + 0x1D7A7: []rune{0x03C8}, // Additional folding + 0x1D7A8: []rune{0x03C9}, // Additional folding + 0x1D7BB: []rune{0x03C3}, // Additional folding +} + +// TableB2 represents RFC-3454 Table B.2. +var TableB2 Mapping = tableB2 + +var tableB3 = Mapping{ + 0x0041: []rune{0x0061}, // Case map + 0x0042: []rune{0x0062}, // Case map + 0x0043: []rune{0x0063}, // Case map + 0x0044: []rune{0x0064}, // Case map + 0x0045: []rune{0x0065}, // Case map + 0x0046: []rune{0x0066}, // Case map + 0x0047: []rune{0x0067}, // Case map + 0x0048: []rune{0x0068}, // Case map + 0x0049: []rune{0x0069}, // Case map + 0x004A: []rune{0x006A}, // Case map + 0x004B: []rune{0x006B}, // Case map + 0x004C: []rune{0x006C}, // Case map + 0x004D: []rune{0x006D}, // Case map + 0x004E: []rune{0x006E}, // Case map + 0x004F: []rune{0x006F}, // Case map + 0x0050: []rune{0x0070}, // Case map + 0x0051: []rune{0x0071}, // Case map + 0x0052: []rune{0x0072}, // Case map + 0x0053: []rune{0x0073}, // Case map + 0x0054: []rune{0x0074}, // Case map + 0x0055: []rune{0x0075}, // Case map + 0x0056: []rune{0x0076}, // Case map + 0x0057: []rune{0x0077}, // Case map + 0x0058: []rune{0x0078}, // Case map + 0x0059: []rune{0x0079}, // Case map + 0x005A: []rune{0x007A}, // Case map + 0x00B5: []rune{0x03BC}, // Case map + 0x00C0: []rune{0x00E0}, // Case map + 0x00C1: []rune{0x00E1}, // Case map + 0x00C2: []rune{0x00E2}, // Case map + 0x00C3: []rune{0x00E3}, // Case map + 0x00C4: []rune{0x00E4}, // Case map + 0x00C5: []rune{0x00E5}, // Case map + 0x00C6: []rune{0x00E6}, // Case map + 0x00C7: []rune{0x00E7}, // Case map + 0x00C8: []rune{0x00E8}, // Case map + 0x00C9: []rune{0x00E9}, // Case map + 0x00CA: []rune{0x00EA}, // Case map + 0x00CB: []rune{0x00EB}, // Case map + 0x00CC: []rune{0x00EC}, // Case map + 0x00CD: []rune{0x00ED}, // Case map + 0x00CE: []rune{0x00EE}, // Case map + 0x00CF: []rune{0x00EF}, // Case map + 0x00D0: []rune{0x00F0}, // Case map + 0x00D1: []rune{0x00F1}, // Case map + 0x00D2: []rune{0x00F2}, // Case map + 0x00D3: []rune{0x00F3}, // Case map + 0x00D4: []rune{0x00F4}, // Case map + 0x00D5: []rune{0x00F5}, // Case map + 0x00D6: []rune{0x00F6}, // Case map + 0x00D8: []rune{0x00F8}, // Case map + 0x00D9: []rune{0x00F9}, // Case map + 0x00DA: []rune{0x00FA}, // Case map + 0x00DB: []rune{0x00FB}, // Case map + 0x00DC: []rune{0x00FC}, // Case map + 0x00DD: []rune{0x00FD}, // Case map + 0x00DE: []rune{0x00FE}, // Case map + 0x00DF: []rune{0x0073, 0x0073}, // Case map + 0x0100: []rune{0x0101}, // Case map + 0x0102: []rune{0x0103}, // Case map + 0x0104: []rune{0x0105}, // Case map + 0x0106: []rune{0x0107}, // Case map + 0x0108: []rune{0x0109}, // Case map + 0x010A: []rune{0x010B}, // Case map + 0x010C: []rune{0x010D}, // Case map + 0x010E: []rune{0x010F}, // Case map + 0x0110: []rune{0x0111}, // Case map + 0x0112: []rune{0x0113}, // Case map + 0x0114: []rune{0x0115}, // Case map + 0x0116: []rune{0x0117}, // Case map + 0x0118: []rune{0x0119}, // Case map + 0x011A: []rune{0x011B}, // Case map + 0x011C: []rune{0x011D}, // Case map + 0x011E: []rune{0x011F}, // Case map + 0x0120: []rune{0x0121}, // Case map + 0x0122: []rune{0x0123}, // Case map + 0x0124: []rune{0x0125}, // Case map + 0x0126: []rune{0x0127}, // Case map + 0x0128: []rune{0x0129}, // Case map + 0x012A: []rune{0x012B}, // Case map + 0x012C: []rune{0x012D}, // Case map + 0x012E: []rune{0x012F}, // Case map + 0x0130: []rune{0x0069, 0x0307}, // Case map + 0x0132: []rune{0x0133}, // Case map + 0x0134: []rune{0x0135}, // Case map + 0x0136: []rune{0x0137}, // Case map + 0x0139: []rune{0x013A}, // Case map + 0x013B: []rune{0x013C}, // Case map + 0x013D: []rune{0x013E}, // Case map + 0x013F: []rune{0x0140}, // Case map + 0x0141: []rune{0x0142}, // Case map + 0x0143: []rune{0x0144}, // Case map + 0x0145: []rune{0x0146}, // Case map + 0x0147: []rune{0x0148}, // Case map + 0x0149: []rune{0x02BC, 0x006E}, // Case map + 0x014A: []rune{0x014B}, // Case map + 0x014C: []rune{0x014D}, // Case map + 0x014E: []rune{0x014F}, // Case map + 0x0150: []rune{0x0151}, // Case map + 0x0152: []rune{0x0153}, // Case map + 0x0154: []rune{0x0155}, // Case map + 0x0156: []rune{0x0157}, // Case map + 0x0158: []rune{0x0159}, // Case map + 0x015A: []rune{0x015B}, // Case map + 0x015C: []rune{0x015D}, // Case map + 0x015E: []rune{0x015F}, // Case map + 0x0160: []rune{0x0161}, // Case map + 0x0162: []rune{0x0163}, // Case map + 0x0164: []rune{0x0165}, // Case map + 0x0166: []rune{0x0167}, // Case map + 0x0168: []rune{0x0169}, // Case map + 0x016A: []rune{0x016B}, // Case map + 0x016C: []rune{0x016D}, // Case map + 0x016E: []rune{0x016F}, // Case map + 0x0170: []rune{0x0171}, // Case map + 0x0172: []rune{0x0173}, // Case map + 0x0174: []rune{0x0175}, // Case map + 0x0176: []rune{0x0177}, // Case map + 0x0178: []rune{0x00FF}, // Case map + 0x0179: []rune{0x017A}, // Case map + 0x017B: []rune{0x017C}, // Case map + 0x017D: []rune{0x017E}, // Case map + 0x017F: []rune{0x0073}, // Case map + 0x0181: []rune{0x0253}, // Case map + 0x0182: []rune{0x0183}, // Case map + 0x0184: []rune{0x0185}, // Case map + 0x0186: []rune{0x0254}, // Case map + 0x0187: []rune{0x0188}, // Case map + 0x0189: []rune{0x0256}, // Case map + 0x018A: []rune{0x0257}, // Case map + 0x018B: []rune{0x018C}, // Case map + 0x018E: []rune{0x01DD}, // Case map + 0x018F: []rune{0x0259}, // Case map + 0x0190: []rune{0x025B}, // Case map + 0x0191: []rune{0x0192}, // Case map + 0x0193: []rune{0x0260}, // Case map + 0x0194: []rune{0x0263}, // Case map + 0x0196: []rune{0x0269}, // Case map + 0x0197: []rune{0x0268}, // Case map + 0x0198: []rune{0x0199}, // Case map + 0x019C: []rune{0x026F}, // Case map + 0x019D: []rune{0x0272}, // Case map + 0x019F: []rune{0x0275}, // Case map + 0x01A0: []rune{0x01A1}, // Case map + 0x01A2: []rune{0x01A3}, // Case map + 0x01A4: []rune{0x01A5}, // Case map + 0x01A6: []rune{0x0280}, // Case map + 0x01A7: []rune{0x01A8}, // Case map + 0x01A9: []rune{0x0283}, // Case map + 0x01AC: []rune{0x01AD}, // Case map + 0x01AE: []rune{0x0288}, // Case map + 0x01AF: []rune{0x01B0}, // Case map + 0x01B1: []rune{0x028A}, // Case map + 0x01B2: []rune{0x028B}, // Case map + 0x01B3: []rune{0x01B4}, // Case map + 0x01B5: []rune{0x01B6}, // Case map + 0x01B7: []rune{0x0292}, // Case map + 0x01B8: []rune{0x01B9}, // Case map + 0x01BC: []rune{0x01BD}, // Case map + 0x01C4: []rune{0x01C6}, // Case map + 0x01C5: []rune{0x01C6}, // Case map + 0x01C7: []rune{0x01C9}, // Case map + 0x01C8: []rune{0x01C9}, // Case map + 0x01CA: []rune{0x01CC}, // Case map + 0x01CB: []rune{0x01CC}, // Case map + 0x01CD: []rune{0x01CE}, // Case map + 0x01CF: []rune{0x01D0}, // Case map + 0x01D1: []rune{0x01D2}, // Case map + 0x01D3: []rune{0x01D4}, // Case map + 0x01D5: []rune{0x01D6}, // Case map + 0x01D7: []rune{0x01D8}, // Case map + 0x01D9: []rune{0x01DA}, // Case map + 0x01DB: []rune{0x01DC}, // Case map + 0x01DE: []rune{0x01DF}, // Case map + 0x01E0: []rune{0x01E1}, // Case map + 0x01E2: []rune{0x01E3}, // Case map + 0x01E4: []rune{0x01E5}, // Case map + 0x01E6: []rune{0x01E7}, // Case map + 0x01E8: []rune{0x01E9}, // Case map + 0x01EA: []rune{0x01EB}, // Case map + 0x01EC: []rune{0x01ED}, // Case map + 0x01EE: []rune{0x01EF}, // Case map + 0x01F0: []rune{0x006A, 0x030C}, // Case map + 0x01F1: []rune{0x01F3}, // Case map + 0x01F2: []rune{0x01F3}, // Case map + 0x01F4: []rune{0x01F5}, // Case map + 0x01F6: []rune{0x0195}, // Case map + 0x01F7: []rune{0x01BF}, // Case map + 0x01F8: []rune{0x01F9}, // Case map + 0x01FA: []rune{0x01FB}, // Case map + 0x01FC: []rune{0x01FD}, // Case map + 0x01FE: []rune{0x01FF}, // Case map + 0x0200: []rune{0x0201}, // Case map + 0x0202: []rune{0x0203}, // Case map + 0x0204: []rune{0x0205}, // Case map + 0x0206: []rune{0x0207}, // Case map + 0x0208: []rune{0x0209}, // Case map + 0x020A: []rune{0x020B}, // Case map + 0x020C: []rune{0x020D}, // Case map + 0x020E: []rune{0x020F}, // Case map + 0x0210: []rune{0x0211}, // Case map + 0x0212: []rune{0x0213}, // Case map + 0x0214: []rune{0x0215}, // Case map + 0x0216: []rune{0x0217}, // Case map + 0x0218: []rune{0x0219}, // Case map + 0x021A: []rune{0x021B}, // Case map + 0x021C: []rune{0x021D}, // Case map + 0x021E: []rune{0x021F}, // Case map + 0x0220: []rune{0x019E}, // Case map + 0x0222: []rune{0x0223}, // Case map + 0x0224: []rune{0x0225}, // Case map + 0x0226: []rune{0x0227}, // Case map + 0x0228: []rune{0x0229}, // Case map + 0x022A: []rune{0x022B}, // Case map + 0x022C: []rune{0x022D}, // Case map + 0x022E: []rune{0x022F}, // Case map + 0x0230: []rune{0x0231}, // Case map + 0x0232: []rune{0x0233}, // Case map + 0x0345: []rune{0x03B9}, // Case map + 0x0386: []rune{0x03AC}, // Case map + 0x0388: []rune{0x03AD}, // Case map + 0x0389: []rune{0x03AE}, // Case map + 0x038A: []rune{0x03AF}, // Case map + 0x038C: []rune{0x03CC}, // Case map + 0x038E: []rune{0x03CD}, // Case map + 0x038F: []rune{0x03CE}, // Case map + 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x0391: []rune{0x03B1}, // Case map + 0x0392: []rune{0x03B2}, // Case map + 0x0393: []rune{0x03B3}, // Case map + 0x0394: []rune{0x03B4}, // Case map + 0x0395: []rune{0x03B5}, // Case map + 0x0396: []rune{0x03B6}, // Case map + 0x0397: []rune{0x03B7}, // Case map + 0x0398: []rune{0x03B8}, // Case map + 0x0399: []rune{0x03B9}, // Case map + 0x039A: []rune{0x03BA}, // Case map + 0x039B: []rune{0x03BB}, // Case map + 0x039C: []rune{0x03BC}, // Case map + 0x039D: []rune{0x03BD}, // Case map + 0x039E: []rune{0x03BE}, // Case map + 0x039F: []rune{0x03BF}, // Case map + 0x03A0: []rune{0x03C0}, // Case map + 0x03A1: []rune{0x03C1}, // Case map + 0x03A3: []rune{0x03C3}, // Case map + 0x03A4: []rune{0x03C4}, // Case map + 0x03A5: []rune{0x03C5}, // Case map + 0x03A6: []rune{0x03C6}, // Case map + 0x03A7: []rune{0x03C7}, // Case map + 0x03A8: []rune{0x03C8}, // Case map + 0x03A9: []rune{0x03C9}, // Case map + 0x03AA: []rune{0x03CA}, // Case map + 0x03AB: []rune{0x03CB}, // Case map + 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x03C2: []rune{0x03C3}, // Case map + 0x03D0: []rune{0x03B2}, // Case map + 0x03D1: []rune{0x03B8}, // Case map + 0x03D5: []rune{0x03C6}, // Case map + 0x03D6: []rune{0x03C0}, // Case map + 0x03D8: []rune{0x03D9}, // Case map + 0x03DA: []rune{0x03DB}, // Case map + 0x03DC: []rune{0x03DD}, // Case map + 0x03DE: []rune{0x03DF}, // Case map + 0x03E0: []rune{0x03E1}, // Case map + 0x03E2: []rune{0x03E3}, // Case map + 0x03E4: []rune{0x03E5}, // Case map + 0x03E6: []rune{0x03E7}, // Case map + 0x03E8: []rune{0x03E9}, // Case map + 0x03EA: []rune{0x03EB}, // Case map + 0x03EC: []rune{0x03ED}, // Case map + 0x03EE: []rune{0x03EF}, // Case map + 0x03F0: []rune{0x03BA}, // Case map + 0x03F1: []rune{0x03C1}, // Case map + 0x03F2: []rune{0x03C3}, // Case map + 0x03F4: []rune{0x03B8}, // Case map + 0x03F5: []rune{0x03B5}, // Case map + 0x0400: []rune{0x0450}, // Case map + 0x0401: []rune{0x0451}, // Case map + 0x0402: []rune{0x0452}, // Case map + 0x0403: []rune{0x0453}, // Case map + 0x0404: []rune{0x0454}, // Case map + 0x0405: []rune{0x0455}, // Case map + 0x0406: []rune{0x0456}, // Case map + 0x0407: []rune{0x0457}, // Case map + 0x0408: []rune{0x0458}, // Case map + 0x0409: []rune{0x0459}, // Case map + 0x040A: []rune{0x045A}, // Case map + 0x040B: []rune{0x045B}, // Case map + 0x040C: []rune{0x045C}, // Case map + 0x040D: []rune{0x045D}, // Case map + 0x040E: []rune{0x045E}, // Case map + 0x040F: []rune{0x045F}, // Case map + 0x0410: []rune{0x0430}, // Case map + 0x0411: []rune{0x0431}, // Case map + 0x0412: []rune{0x0432}, // Case map + 0x0413: []rune{0x0433}, // Case map + 0x0414: []rune{0x0434}, // Case map + 0x0415: []rune{0x0435}, // Case map + 0x0416: []rune{0x0436}, // Case map + 0x0417: []rune{0x0437}, // Case map + 0x0418: []rune{0x0438}, // Case map + 0x0419: []rune{0x0439}, // Case map + 0x041A: []rune{0x043A}, // Case map + 0x041B: []rune{0x043B}, // Case map + 0x041C: []rune{0x043C}, // Case map + 0x041D: []rune{0x043D}, // Case map + 0x041E: []rune{0x043E}, // Case map + 0x041F: []rune{0x043F}, // Case map + 0x0420: []rune{0x0440}, // Case map + 0x0421: []rune{0x0441}, // Case map + 0x0422: []rune{0x0442}, // Case map + 0x0423: []rune{0x0443}, // Case map + 0x0424: []rune{0x0444}, // Case map + 0x0425: []rune{0x0445}, // Case map + 0x0426: []rune{0x0446}, // Case map + 0x0427: []rune{0x0447}, // Case map + 0x0428: []rune{0x0448}, // Case map + 0x0429: []rune{0x0449}, // Case map + 0x042A: []rune{0x044A}, // Case map + 0x042B: []rune{0x044B}, // Case map + 0x042C: []rune{0x044C}, // Case map + 0x042D: []rune{0x044D}, // Case map + 0x042E: []rune{0x044E}, // Case map + 0x042F: []rune{0x044F}, // Case map + 0x0460: []rune{0x0461}, // Case map + 0x0462: []rune{0x0463}, // Case map + 0x0464: []rune{0x0465}, // Case map + 0x0466: []rune{0x0467}, // Case map + 0x0468: []rune{0x0469}, // Case map + 0x046A: []rune{0x046B}, // Case map + 0x046C: []rune{0x046D}, // Case map + 0x046E: []rune{0x046F}, // Case map + 0x0470: []rune{0x0471}, // Case map + 0x0472: []rune{0x0473}, // Case map + 0x0474: []rune{0x0475}, // Case map + 0x0476: []rune{0x0477}, // Case map + 0x0478: []rune{0x0479}, // Case map + 0x047A: []rune{0x047B}, // Case map + 0x047C: []rune{0x047D}, // Case map + 0x047E: []rune{0x047F}, // Case map + 0x0480: []rune{0x0481}, // Case map + 0x048A: []rune{0x048B}, // Case map + 0x048C: []rune{0x048D}, // Case map + 0x048E: []rune{0x048F}, // Case map + 0x0490: []rune{0x0491}, // Case map + 0x0492: []rune{0x0493}, // Case map + 0x0494: []rune{0x0495}, // Case map + 0x0496: []rune{0x0497}, // Case map + 0x0498: []rune{0x0499}, // Case map + 0x049A: []rune{0x049B}, // Case map + 0x049C: []rune{0x049D}, // Case map + 0x049E: []rune{0x049F}, // Case map + 0x04A0: []rune{0x04A1}, // Case map + 0x04A2: []rune{0x04A3}, // Case map + 0x04A4: []rune{0x04A5}, // Case map + 0x04A6: []rune{0x04A7}, // Case map + 0x04A8: []rune{0x04A9}, // Case map + 0x04AA: []rune{0x04AB}, // Case map + 0x04AC: []rune{0x04AD}, // Case map + 0x04AE: []rune{0x04AF}, // Case map + 0x04B0: []rune{0x04B1}, // Case map + 0x04B2: []rune{0x04B3}, // Case map + 0x04B4: []rune{0x04B5}, // Case map + 0x04B6: []rune{0x04B7}, // Case map + 0x04B8: []rune{0x04B9}, // Case map + 0x04BA: []rune{0x04BB}, // Case map + 0x04BC: []rune{0x04BD}, // Case map + 0x04BE: []rune{0x04BF}, // Case map + 0x04C1: []rune{0x04C2}, // Case map + 0x04C3: []rune{0x04C4}, // Case map + 0x04C5: []rune{0x04C6}, // Case map + 0x04C7: []rune{0x04C8}, // Case map + 0x04C9: []rune{0x04CA}, // Case map + 0x04CB: []rune{0x04CC}, // Case map + 0x04CD: []rune{0x04CE}, // Case map + 0x04D0: []rune{0x04D1}, // Case map + 0x04D2: []rune{0x04D3}, // Case map + 0x04D4: []rune{0x04D5}, // Case map + 0x04D6: []rune{0x04D7}, // Case map + 0x04D8: []rune{0x04D9}, // Case map + 0x04DA: []rune{0x04DB}, // Case map + 0x04DC: []rune{0x04DD}, // Case map + 0x04DE: []rune{0x04DF}, // Case map + 0x04E0: []rune{0x04E1}, // Case map + 0x04E2: []rune{0x04E3}, // Case map + 0x04E4: []rune{0x04E5}, // Case map + 0x04E6: []rune{0x04E7}, // Case map + 0x04E8: []rune{0x04E9}, // Case map + 0x04EA: []rune{0x04EB}, // Case map + 0x04EC: []rune{0x04ED}, // Case map + 0x04EE: []rune{0x04EF}, // Case map + 0x04F0: []rune{0x04F1}, // Case map + 0x04F2: []rune{0x04F3}, // Case map + 0x04F4: []rune{0x04F5}, // Case map + 0x04F8: []rune{0x04F9}, // Case map + 0x0500: []rune{0x0501}, // Case map + 0x0502: []rune{0x0503}, // Case map + 0x0504: []rune{0x0505}, // Case map + 0x0506: []rune{0x0507}, // Case map + 0x0508: []rune{0x0509}, // Case map + 0x050A: []rune{0x050B}, // Case map + 0x050C: []rune{0x050D}, // Case map + 0x050E: []rune{0x050F}, // Case map + 0x0531: []rune{0x0561}, // Case map + 0x0532: []rune{0x0562}, // Case map + 0x0533: []rune{0x0563}, // Case map + 0x0534: []rune{0x0564}, // Case map + 0x0535: []rune{0x0565}, // Case map + 0x0536: []rune{0x0566}, // Case map + 0x0537: []rune{0x0567}, // Case map + 0x0538: []rune{0x0568}, // Case map + 0x0539: []rune{0x0569}, // Case map + 0x053A: []rune{0x056A}, // Case map + 0x053B: []rune{0x056B}, // Case map + 0x053C: []rune{0x056C}, // Case map + 0x053D: []rune{0x056D}, // Case map + 0x053E: []rune{0x056E}, // Case map + 0x053F: []rune{0x056F}, // Case map + 0x0540: []rune{0x0570}, // Case map + 0x0541: []rune{0x0571}, // Case map + 0x0542: []rune{0x0572}, // Case map + 0x0543: []rune{0x0573}, // Case map + 0x0544: []rune{0x0574}, // Case map + 0x0545: []rune{0x0575}, // Case map + 0x0546: []rune{0x0576}, // Case map + 0x0547: []rune{0x0577}, // Case map + 0x0548: []rune{0x0578}, // Case map + 0x0549: []rune{0x0579}, // Case map + 0x054A: []rune{0x057A}, // Case map + 0x054B: []rune{0x057B}, // Case map + 0x054C: []rune{0x057C}, // Case map + 0x054D: []rune{0x057D}, // Case map + 0x054E: []rune{0x057E}, // Case map + 0x054F: []rune{0x057F}, // Case map + 0x0550: []rune{0x0580}, // Case map + 0x0551: []rune{0x0581}, // Case map + 0x0552: []rune{0x0582}, // Case map + 0x0553: []rune{0x0583}, // Case map + 0x0554: []rune{0x0584}, // Case map + 0x0555: []rune{0x0585}, // Case map + 0x0556: []rune{0x0586}, // Case map + 0x0587: []rune{0x0565, 0x0582}, // Case map + 0x1E00: []rune{0x1E01}, // Case map + 0x1E02: []rune{0x1E03}, // Case map + 0x1E04: []rune{0x1E05}, // Case map + 0x1E06: []rune{0x1E07}, // Case map + 0x1E08: []rune{0x1E09}, // Case map + 0x1E0A: []rune{0x1E0B}, // Case map + 0x1E0C: []rune{0x1E0D}, // Case map + 0x1E0E: []rune{0x1E0F}, // Case map + 0x1E10: []rune{0x1E11}, // Case map + 0x1E12: []rune{0x1E13}, // Case map + 0x1E14: []rune{0x1E15}, // Case map + 0x1E16: []rune{0x1E17}, // Case map + 0x1E18: []rune{0x1E19}, // Case map + 0x1E1A: []rune{0x1E1B}, // Case map + 0x1E1C: []rune{0x1E1D}, // Case map + 0x1E1E: []rune{0x1E1F}, // Case map + 0x1E20: []rune{0x1E21}, // Case map + 0x1E22: []rune{0x1E23}, // Case map + 0x1E24: []rune{0x1E25}, // Case map + 0x1E26: []rune{0x1E27}, // Case map + 0x1E28: []rune{0x1E29}, // Case map + 0x1E2A: []rune{0x1E2B}, // Case map + 0x1E2C: []rune{0x1E2D}, // Case map + 0x1E2E: []rune{0x1E2F}, // Case map + 0x1E30: []rune{0x1E31}, // Case map + 0x1E32: []rune{0x1E33}, // Case map + 0x1E34: []rune{0x1E35}, // Case map + 0x1E36: []rune{0x1E37}, // Case map + 0x1E38: []rune{0x1E39}, // Case map + 0x1E3A: []rune{0x1E3B}, // Case map + 0x1E3C: []rune{0x1E3D}, // Case map + 0x1E3E: []rune{0x1E3F}, // Case map + 0x1E40: []rune{0x1E41}, // Case map + 0x1E42: []rune{0x1E43}, // Case map + 0x1E44: []rune{0x1E45}, // Case map + 0x1E46: []rune{0x1E47}, // Case map + 0x1E48: []rune{0x1E49}, // Case map + 0x1E4A: []rune{0x1E4B}, // Case map + 0x1E4C: []rune{0x1E4D}, // Case map + 0x1E4E: []rune{0x1E4F}, // Case map + 0x1E50: []rune{0x1E51}, // Case map + 0x1E52: []rune{0x1E53}, // Case map + 0x1E54: []rune{0x1E55}, // Case map + 0x1E56: []rune{0x1E57}, // Case map + 0x1E58: []rune{0x1E59}, // Case map + 0x1E5A: []rune{0x1E5B}, // Case map + 0x1E5C: []rune{0x1E5D}, // Case map + 0x1E5E: []rune{0x1E5F}, // Case map + 0x1E60: []rune{0x1E61}, // Case map + 0x1E62: []rune{0x1E63}, // Case map + 0x1E64: []rune{0x1E65}, // Case map + 0x1E66: []rune{0x1E67}, // Case map + 0x1E68: []rune{0x1E69}, // Case map + 0x1E6A: []rune{0x1E6B}, // Case map + 0x1E6C: []rune{0x1E6D}, // Case map + 0x1E6E: []rune{0x1E6F}, // Case map + 0x1E70: []rune{0x1E71}, // Case map + 0x1E72: []rune{0x1E73}, // Case map + 0x1E74: []rune{0x1E75}, // Case map + 0x1E76: []rune{0x1E77}, // Case map + 0x1E78: []rune{0x1E79}, // Case map + 0x1E7A: []rune{0x1E7B}, // Case map + 0x1E7C: []rune{0x1E7D}, // Case map + 0x1E7E: []rune{0x1E7F}, // Case map + 0x1E80: []rune{0x1E81}, // Case map + 0x1E82: []rune{0x1E83}, // Case map + 0x1E84: []rune{0x1E85}, // Case map + 0x1E86: []rune{0x1E87}, // Case map + 0x1E88: []rune{0x1E89}, // Case map + 0x1E8A: []rune{0x1E8B}, // Case map + 0x1E8C: []rune{0x1E8D}, // Case map + 0x1E8E: []rune{0x1E8F}, // Case map + 0x1E90: []rune{0x1E91}, // Case map + 0x1E92: []rune{0x1E93}, // Case map + 0x1E94: []rune{0x1E95}, // Case map + 0x1E96: []rune{0x0068, 0x0331}, // Case map + 0x1E97: []rune{0x0074, 0x0308}, // Case map + 0x1E98: []rune{0x0077, 0x030A}, // Case map + 0x1E99: []rune{0x0079, 0x030A}, // Case map + 0x1E9A: []rune{0x0061, 0x02BE}, // Case map + 0x1E9B: []rune{0x1E61}, // Case map + 0x1EA0: []rune{0x1EA1}, // Case map + 0x1EA2: []rune{0x1EA3}, // Case map + 0x1EA4: []rune{0x1EA5}, // Case map + 0x1EA6: []rune{0x1EA7}, // Case map + 0x1EA8: []rune{0x1EA9}, // Case map + 0x1EAA: []rune{0x1EAB}, // Case map + 0x1EAC: []rune{0x1EAD}, // Case map + 0x1EAE: []rune{0x1EAF}, // Case map + 0x1EB0: []rune{0x1EB1}, // Case map + 0x1EB2: []rune{0x1EB3}, // Case map + 0x1EB4: []rune{0x1EB5}, // Case map + 0x1EB6: []rune{0x1EB7}, // Case map + 0x1EB8: []rune{0x1EB9}, // Case map + 0x1EBA: []rune{0x1EBB}, // Case map + 0x1EBC: []rune{0x1EBD}, // Case map + 0x1EBE: []rune{0x1EBF}, // Case map + 0x1EC0: []rune{0x1EC1}, // Case map + 0x1EC2: []rune{0x1EC3}, // Case map + 0x1EC4: []rune{0x1EC5}, // Case map + 0x1EC6: []rune{0x1EC7}, // Case map + 0x1EC8: []rune{0x1EC9}, // Case map + 0x1ECA: []rune{0x1ECB}, // Case map + 0x1ECC: []rune{0x1ECD}, // Case map + 0x1ECE: []rune{0x1ECF}, // Case map + 0x1ED0: []rune{0x1ED1}, // Case map + 0x1ED2: []rune{0x1ED3}, // Case map + 0x1ED4: []rune{0x1ED5}, // Case map + 0x1ED6: []rune{0x1ED7}, // Case map + 0x1ED8: []rune{0x1ED9}, // Case map + 0x1EDA: []rune{0x1EDB}, // Case map + 0x1EDC: []rune{0x1EDD}, // Case map + 0x1EDE: []rune{0x1EDF}, // Case map + 0x1EE0: []rune{0x1EE1}, // Case map + 0x1EE2: []rune{0x1EE3}, // Case map + 0x1EE4: []rune{0x1EE5}, // Case map + 0x1EE6: []rune{0x1EE7}, // Case map + 0x1EE8: []rune{0x1EE9}, // Case map + 0x1EEA: []rune{0x1EEB}, // Case map + 0x1EEC: []rune{0x1EED}, // Case map + 0x1EEE: []rune{0x1EEF}, // Case map + 0x1EF0: []rune{0x1EF1}, // Case map + 0x1EF2: []rune{0x1EF3}, // Case map + 0x1EF4: []rune{0x1EF5}, // Case map + 0x1EF6: []rune{0x1EF7}, // Case map + 0x1EF8: []rune{0x1EF9}, // Case map + 0x1F08: []rune{0x1F00}, // Case map + 0x1F09: []rune{0x1F01}, // Case map + 0x1F0A: []rune{0x1F02}, // Case map + 0x1F0B: []rune{0x1F03}, // Case map + 0x1F0C: []rune{0x1F04}, // Case map + 0x1F0D: []rune{0x1F05}, // Case map + 0x1F0E: []rune{0x1F06}, // Case map + 0x1F0F: []rune{0x1F07}, // Case map + 0x1F18: []rune{0x1F10}, // Case map + 0x1F19: []rune{0x1F11}, // Case map + 0x1F1A: []rune{0x1F12}, // Case map + 0x1F1B: []rune{0x1F13}, // Case map + 0x1F1C: []rune{0x1F14}, // Case map + 0x1F1D: []rune{0x1F15}, // Case map + 0x1F28: []rune{0x1F20}, // Case map + 0x1F29: []rune{0x1F21}, // Case map + 0x1F2A: []rune{0x1F22}, // Case map + 0x1F2B: []rune{0x1F23}, // Case map + 0x1F2C: []rune{0x1F24}, // Case map + 0x1F2D: []rune{0x1F25}, // Case map + 0x1F2E: []rune{0x1F26}, // Case map + 0x1F2F: []rune{0x1F27}, // Case map + 0x1F38: []rune{0x1F30}, // Case map + 0x1F39: []rune{0x1F31}, // Case map + 0x1F3A: []rune{0x1F32}, // Case map + 0x1F3B: []rune{0x1F33}, // Case map + 0x1F3C: []rune{0x1F34}, // Case map + 0x1F3D: []rune{0x1F35}, // Case map + 0x1F3E: []rune{0x1F36}, // Case map + 0x1F3F: []rune{0x1F37}, // Case map + 0x1F48: []rune{0x1F40}, // Case map + 0x1F49: []rune{0x1F41}, // Case map + 0x1F4A: []rune{0x1F42}, // Case map + 0x1F4B: []rune{0x1F43}, // Case map + 0x1F4C: []rune{0x1F44}, // Case map + 0x1F4D: []rune{0x1F45}, // Case map + 0x1F50: []rune{0x03C5, 0x0313}, // Case map + 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map + 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map + 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map + 0x1F59: []rune{0x1F51}, // Case map + 0x1F5B: []rune{0x1F53}, // Case map + 0x1F5D: []rune{0x1F55}, // Case map + 0x1F5F: []rune{0x1F57}, // Case map + 0x1F68: []rune{0x1F60}, // Case map + 0x1F69: []rune{0x1F61}, // Case map + 0x1F6A: []rune{0x1F62}, // Case map + 0x1F6B: []rune{0x1F63}, // Case map + 0x1F6C: []rune{0x1F64}, // Case map + 0x1F6D: []rune{0x1F65}, // Case map + 0x1F6E: []rune{0x1F66}, // Case map + 0x1F6F: []rune{0x1F67}, // Case map + 0x1F80: []rune{0x1F00, 0x03B9}, // Case map + 0x1F81: []rune{0x1F01, 0x03B9}, // Case map + 0x1F82: []rune{0x1F02, 0x03B9}, // Case map + 0x1F83: []rune{0x1F03, 0x03B9}, // Case map + 0x1F84: []rune{0x1F04, 0x03B9}, // Case map + 0x1F85: []rune{0x1F05, 0x03B9}, // Case map + 0x1F86: []rune{0x1F06, 0x03B9}, // Case map + 0x1F87: []rune{0x1F07, 0x03B9}, // Case map + 0x1F88: []rune{0x1F00, 0x03B9}, // Case map + 0x1F89: []rune{0x1F01, 0x03B9}, // Case map + 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map + 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map + 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map + 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map + 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map + 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map + 0x1F90: []rune{0x1F20, 0x03B9}, // Case map + 0x1F91: []rune{0x1F21, 0x03B9}, // Case map + 0x1F92: []rune{0x1F22, 0x03B9}, // Case map + 0x1F93: []rune{0x1F23, 0x03B9}, // Case map + 0x1F94: []rune{0x1F24, 0x03B9}, // Case map + 0x1F95: []rune{0x1F25, 0x03B9}, // Case map + 0x1F96: []rune{0x1F26, 0x03B9}, // Case map + 0x1F97: []rune{0x1F27, 0x03B9}, // Case map + 0x1F98: []rune{0x1F20, 0x03B9}, // Case map + 0x1F99: []rune{0x1F21, 0x03B9}, // Case map + 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map + 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map + 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map + 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map + 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map + 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map + 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map + 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map + 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map + 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map + 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map + 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map + 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map + 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map + 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map + 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map + 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map + 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map + 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map + 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map + 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map + 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map + 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map + 0x1FB6: []rune{0x03B1, 0x0342}, // Case map + 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map + 0x1FB8: []rune{0x1FB0}, // Case map + 0x1FB9: []rune{0x1FB1}, // Case map + 0x1FBA: []rune{0x1F70}, // Case map + 0x1FBB: []rune{0x1F71}, // Case map + 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map + 0x1FBE: []rune{0x03B9}, // Case map + 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map + 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map + 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map + 0x1FC6: []rune{0x03B7, 0x0342}, // Case map + 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map + 0x1FC8: []rune{0x1F72}, // Case map + 0x1FC9: []rune{0x1F73}, // Case map + 0x1FCA: []rune{0x1F74}, // Case map + 0x1FCB: []rune{0x1F75}, // Case map + 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map + 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map + 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x1FD6: []rune{0x03B9, 0x0342}, // Case map + 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map + 0x1FD8: []rune{0x1FD0}, // Case map + 0x1FD9: []rune{0x1FD1}, // Case map + 0x1FDA: []rune{0x1F76}, // Case map + 0x1FDB: []rune{0x1F77}, // Case map + 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map + 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x1FE4: []rune{0x03C1, 0x0313}, // Case map + 0x1FE6: []rune{0x03C5, 0x0342}, // Case map + 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map + 0x1FE8: []rune{0x1FE0}, // Case map + 0x1FE9: []rune{0x1FE1}, // Case map + 0x1FEA: []rune{0x1F7A}, // Case map + 0x1FEB: []rune{0x1F7B}, // Case map + 0x1FEC: []rune{0x1FE5}, // Case map + 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map + 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map + 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map + 0x1FF6: []rune{0x03C9, 0x0342}, // Case map + 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map + 0x1FF8: []rune{0x1F78}, // Case map + 0x1FF9: []rune{0x1F79}, // Case map + 0x1FFA: []rune{0x1F7C}, // Case map + 0x1FFB: []rune{0x1F7D}, // Case map + 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map + 0x2126: []rune{0x03C9}, // Case map + 0x212A: []rune{0x006B}, // Case map + 0x212B: []rune{0x00E5}, // Case map + 0x2160: []rune{0x2170}, // Case map + 0x2161: []rune{0x2171}, // Case map + 0x2162: []rune{0x2172}, // Case map + 0x2163: []rune{0x2173}, // Case map + 0x2164: []rune{0x2174}, // Case map + 0x2165: []rune{0x2175}, // Case map + 0x2166: []rune{0x2176}, // Case map + 0x2167: []rune{0x2177}, // Case map + 0x2168: []rune{0x2178}, // Case map + 0x2169: []rune{0x2179}, // Case map + 0x216A: []rune{0x217A}, // Case map + 0x216B: []rune{0x217B}, // Case map + 0x216C: []rune{0x217C}, // Case map + 0x216D: []rune{0x217D}, // Case map + 0x216E: []rune{0x217E}, // Case map + 0x216F: []rune{0x217F}, // Case map + 0x24B6: []rune{0x24D0}, // Case map + 0x24B7: []rune{0x24D1}, // Case map + 0x24B8: []rune{0x24D2}, // Case map + 0x24B9: []rune{0x24D3}, // Case map + 0x24BA: []rune{0x24D4}, // Case map + 0x24BB: []rune{0x24D5}, // Case map + 0x24BC: []rune{0x24D6}, // Case map + 0x24BD: []rune{0x24D7}, // Case map + 0x24BE: []rune{0x24D8}, // Case map + 0x24BF: []rune{0x24D9}, // Case map + 0x24C0: []rune{0x24DA}, // Case map + 0x24C1: []rune{0x24DB}, // Case map + 0x24C2: []rune{0x24DC}, // Case map + 0x24C3: []rune{0x24DD}, // Case map + 0x24C4: []rune{0x24DE}, // Case map + 0x24C5: []rune{0x24DF}, // Case map + 0x24C6: []rune{0x24E0}, // Case map + 0x24C7: []rune{0x24E1}, // Case map + 0x24C8: []rune{0x24E2}, // Case map + 0x24C9: []rune{0x24E3}, // Case map + 0x24CA: []rune{0x24E4}, // Case map + 0x24CB: []rune{0x24E5}, // Case map + 0x24CC: []rune{0x24E6}, // Case map + 0x24CD: []rune{0x24E7}, // Case map + 0x24CE: []rune{0x24E8}, // Case map + 0x24CF: []rune{0x24E9}, // Case map + 0xFB00: []rune{0x0066, 0x0066}, // Case map + 0xFB01: []rune{0x0066, 0x0069}, // Case map + 0xFB02: []rune{0x0066, 0x006C}, // Case map + 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map + 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map + 0xFB05: []rune{0x0073, 0x0074}, // Case map + 0xFB06: []rune{0x0073, 0x0074}, // Case map + 0xFB13: []rune{0x0574, 0x0576}, // Case map + 0xFB14: []rune{0x0574, 0x0565}, // Case map + 0xFB15: []rune{0x0574, 0x056B}, // Case map + 0xFB16: []rune{0x057E, 0x0576}, // Case map + 0xFB17: []rune{0x0574, 0x056D}, // Case map + 0xFF21: []rune{0xFF41}, // Case map + 0xFF22: []rune{0xFF42}, // Case map + 0xFF23: []rune{0xFF43}, // Case map + 0xFF24: []rune{0xFF44}, // Case map + 0xFF25: []rune{0xFF45}, // Case map + 0xFF26: []rune{0xFF46}, // Case map + 0xFF27: []rune{0xFF47}, // Case map + 0xFF28: []rune{0xFF48}, // Case map + 0xFF29: []rune{0xFF49}, // Case map + 0xFF2A: []rune{0xFF4A}, // Case map + 0xFF2B: []rune{0xFF4B}, // Case map + 0xFF2C: []rune{0xFF4C}, // Case map + 0xFF2D: []rune{0xFF4D}, // Case map + 0xFF2E: []rune{0xFF4E}, // Case map + 0xFF2F: []rune{0xFF4F}, // Case map + 0xFF30: []rune{0xFF50}, // Case map + 0xFF31: []rune{0xFF51}, // Case map + 0xFF32: []rune{0xFF52}, // Case map + 0xFF33: []rune{0xFF53}, // Case map + 0xFF34: []rune{0xFF54}, // Case map + 0xFF35: []rune{0xFF55}, // Case map + 0xFF36: []rune{0xFF56}, // Case map + 0xFF37: []rune{0xFF57}, // Case map + 0xFF38: []rune{0xFF58}, // Case map + 0xFF39: []rune{0xFF59}, // Case map + 0xFF3A: []rune{0xFF5A}, // Case map + 0x10400: []rune{0x10428}, // Case map + 0x10401: []rune{0x10429}, // Case map + 0x10402: []rune{0x1042A}, // Case map + 0x10403: []rune{0x1042B}, // Case map + 0x10404: []rune{0x1042C}, // Case map + 0x10405: []rune{0x1042D}, // Case map + 0x10406: []rune{0x1042E}, // Case map + 0x10407: []rune{0x1042F}, // Case map + 0x10408: []rune{0x10430}, // Case map + 0x10409: []rune{0x10431}, // Case map + 0x1040A: []rune{0x10432}, // Case map + 0x1040B: []rune{0x10433}, // Case map + 0x1040C: []rune{0x10434}, // Case map + 0x1040D: []rune{0x10435}, // Case map + 0x1040E: []rune{0x10436}, // Case map + 0x1040F: []rune{0x10437}, // Case map + 0x10410: []rune{0x10438}, // Case map + 0x10411: []rune{0x10439}, // Case map + 0x10412: []rune{0x1043A}, // Case map + 0x10413: []rune{0x1043B}, // Case map + 0x10414: []rune{0x1043C}, // Case map + 0x10415: []rune{0x1043D}, // Case map + 0x10416: []rune{0x1043E}, // Case map + 0x10417: []rune{0x1043F}, // Case map + 0x10418: []rune{0x10440}, // Case map + 0x10419: []rune{0x10441}, // Case map + 0x1041A: []rune{0x10442}, // Case map + 0x1041B: []rune{0x10443}, // Case map + 0x1041C: []rune{0x10444}, // Case map + 0x1041D: []rune{0x10445}, // Case map + 0x1041E: []rune{0x10446}, // Case map + 0x1041F: []rune{0x10447}, // Case map + 0x10420: []rune{0x10448}, // Case map + 0x10421: []rune{0x10449}, // Case map + 0x10422: []rune{0x1044A}, // Case map + 0x10423: []rune{0x1044B}, // Case map + 0x10424: []rune{0x1044C}, // Case map + 0x10425: []rune{0x1044D}, // Case map +} + +// TableB3 represents RFC-3454 Table B.3. +var TableB3 Mapping = tableB3 + +var tableC1_1 = Set{ + RuneRange{0x0020, 0x0020}, // SPACE +} + +// TableC1_1 represents RFC-3454 Table C.1.1. +var TableC1_1 Set = tableC1_1 + +var tableC1_2 = Set{ + RuneRange{0x00A0, 0x00A0}, // NO-BREAK SPACE + RuneRange{0x1680, 0x1680}, // OGHAM SPACE MARK + RuneRange{0x2000, 0x2000}, // EN QUAD + RuneRange{0x2001, 0x2001}, // EM QUAD + RuneRange{0x2002, 0x2002}, // EN SPACE + RuneRange{0x2003, 0x2003}, // EM SPACE + RuneRange{0x2004, 0x2004}, // THREE-PER-EM SPACE + RuneRange{0x2005, 0x2005}, // FOUR-PER-EM SPACE + RuneRange{0x2006, 0x2006}, // SIX-PER-EM SPACE + RuneRange{0x2007, 0x2007}, // FIGURE SPACE + RuneRange{0x2008, 0x2008}, // PUNCTUATION SPACE + RuneRange{0x2009, 0x2009}, // THIN SPACE + RuneRange{0x200A, 0x200A}, // HAIR SPACE + RuneRange{0x200B, 0x200B}, // ZERO WIDTH SPACE + RuneRange{0x202F, 0x202F}, // NARROW NO-BREAK SPACE + RuneRange{0x205F, 0x205F}, // MEDIUM MATHEMATICAL SPACE + RuneRange{0x3000, 0x3000}, // IDEOGRAPHIC SPACE +} + +// TableC1_2 represents RFC-3454 Table C.1.2. +var TableC1_2 Set = tableC1_2 + +var tableC2_1 = Set{ + RuneRange{0x0000, 0x001F}, // [CONTROL CHARACTERS] + RuneRange{0x007F, 0x007F}, // DELETE +} + +// TableC2_1 represents RFC-3454 Table C.2.1. +var TableC2_1 Set = tableC2_1 + +var tableC2_2 = Set{ + RuneRange{0x0080, 0x009F}, // [CONTROL CHARACTERS] + RuneRange{0x06DD, 0x06DD}, // ARABIC END OF AYAH + RuneRange{0x070F, 0x070F}, // SYRIAC ABBREVIATION MARK + RuneRange{0x180E, 0x180E}, // MONGOLIAN VOWEL SEPARATOR + RuneRange{0x200C, 0x200C}, // ZERO WIDTH NON-JOINER + RuneRange{0x200D, 0x200D}, // ZERO WIDTH JOINER + RuneRange{0x2028, 0x2028}, // LINE SEPARATOR + RuneRange{0x2029, 0x2029}, // PARAGRAPH SEPARATOR + RuneRange{0x2060, 0x2060}, // WORD JOINER + RuneRange{0x2061, 0x2061}, // FUNCTION APPLICATION + RuneRange{0x2062, 0x2062}, // INVISIBLE TIMES + RuneRange{0x2063, 0x2063}, // INVISIBLE SEPARATOR + RuneRange{0x206A, 0x206F}, // [CONTROL CHARACTERS] + RuneRange{0xFEFF, 0xFEFF}, // ZERO WIDTH NO-BREAK SPACE + RuneRange{0xFFF9, 0xFFFC}, // [CONTROL CHARACTERS] + RuneRange{0x1D173, 0x1D17A}, // [MUSICAL CONTROL CHARACTERS] +} + +// TableC2_2 represents RFC-3454 Table C.2.2. +var TableC2_2 Set = tableC2_2 + +var tableC3 = Set{ + RuneRange{0xE000, 0xF8FF}, // [PRIVATE USE, PLANE 0] + RuneRange{0xF0000, 0xFFFFD}, // [PRIVATE USE, PLANE 15] + RuneRange{0x100000, 0x10FFFD}, // [PRIVATE USE, PLANE 16] +} + +// TableC3 represents RFC-3454 Table C.3. +var TableC3 Set = tableC3 + +var tableC4 = Set{ + RuneRange{0xFDD0, 0xFDEF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xFFFE, 0xFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x1FFFE, 0x1FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x2FFFE, 0x2FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x3FFFE, 0x3FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x4FFFE, 0x4FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x5FFFE, 0x5FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x6FFFE, 0x6FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x7FFFE, 0x7FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x8FFFE, 0x8FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x9FFFE, 0x9FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xAFFFE, 0xAFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xBFFFE, 0xBFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xCFFFE, 0xCFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xDFFFE, 0xDFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xEFFFE, 0xEFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xFFFFE, 0xFFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x10FFFE, 0x10FFFF}, // [NONCHARACTER CODE POINTS] +} + +// TableC4 represents RFC-3454 Table C.4. +var TableC4 Set = tableC4 + +var tableC5 = Set{ + RuneRange{0xD800, 0xDFFF}, // [SURROGATE CODES] +} + +// TableC5 represents RFC-3454 Table C.5. +var TableC5 Set = tableC5 + +var tableC6 = Set{ + RuneRange{0xFFF9, 0xFFF9}, // INTERLINEAR ANNOTATION ANCHOR + RuneRange{0xFFFA, 0xFFFA}, // INTERLINEAR ANNOTATION SEPARATOR + RuneRange{0xFFFB, 0xFFFB}, // INTERLINEAR ANNOTATION TERMINATOR + RuneRange{0xFFFC, 0xFFFC}, // OBJECT REPLACEMENT CHARACTER + RuneRange{0xFFFD, 0xFFFD}, // REPLACEMENT CHARACTER +} + +// TableC6 represents RFC-3454 Table C.6. +var TableC6 Set = tableC6 + +var tableC7 = Set{ + RuneRange{0x2FF0, 0x2FFB}, // [IDEOGRAPHIC DESCRIPTION CHARACTERS] +} + +// TableC7 represents RFC-3454 Table C.7. +var TableC7 Set = tableC7 + +var tableC8 = Set{ + RuneRange{0x0340, 0x0340}, // COMBINING GRAVE TONE MARK + RuneRange{0x0341, 0x0341}, // COMBINING ACUTE TONE MARK + RuneRange{0x200E, 0x200E}, // LEFT-TO-RIGHT MARK + RuneRange{0x200F, 0x200F}, // RIGHT-TO-LEFT MARK + RuneRange{0x202A, 0x202A}, // LEFT-TO-RIGHT EMBEDDING + RuneRange{0x202B, 0x202B}, // RIGHT-TO-LEFT EMBEDDING + RuneRange{0x202C, 0x202C}, // POP DIRECTIONAL FORMATTING + RuneRange{0x202D, 0x202D}, // LEFT-TO-RIGHT OVERRIDE + RuneRange{0x202E, 0x202E}, // RIGHT-TO-LEFT OVERRIDE + RuneRange{0x206A, 0x206A}, // INHIBIT SYMMETRIC SWAPPING + RuneRange{0x206B, 0x206B}, // ACTIVATE SYMMETRIC SWAPPING + RuneRange{0x206C, 0x206C}, // INHIBIT ARABIC FORM SHAPING + RuneRange{0x206D, 0x206D}, // ACTIVATE ARABIC FORM SHAPING + RuneRange{0x206E, 0x206E}, // NATIONAL DIGIT SHAPES + RuneRange{0x206F, 0x206F}, // NOMINAL DIGIT SHAPES +} + +// TableC8 represents RFC-3454 Table C.8. +var TableC8 Set = tableC8 + +var tableC9 = Set{ + RuneRange{0xE0001, 0xE0001}, // LANGUAGE TAG + RuneRange{0xE0020, 0xE007F}, // [TAGGING CHARACTERS] +} + +// TableC9 represents RFC-3454 Table C.9. +var TableC9 Set = tableC9 + +var tableD1 = Set{ + RuneRange{0x05BE, 0x05BE}, + RuneRange{0x05C0, 0x05C0}, + RuneRange{0x05C3, 0x05C3}, + RuneRange{0x05D0, 0x05EA}, + RuneRange{0x05F0, 0x05F4}, + RuneRange{0x061B, 0x061B}, + RuneRange{0x061F, 0x061F}, + RuneRange{0x0621, 0x063A}, + RuneRange{0x0640, 0x064A}, + RuneRange{0x066D, 0x066F}, + RuneRange{0x0671, 0x06D5}, + RuneRange{0x06DD, 0x06DD}, + RuneRange{0x06E5, 0x06E6}, + RuneRange{0x06FA, 0x06FE}, + RuneRange{0x0700, 0x070D}, + RuneRange{0x0710, 0x0710}, + RuneRange{0x0712, 0x072C}, + RuneRange{0x0780, 0x07A5}, + RuneRange{0x07B1, 0x07B1}, + RuneRange{0x200F, 0x200F}, + RuneRange{0xFB1D, 0xFB1D}, + RuneRange{0xFB1F, 0xFB28}, + RuneRange{0xFB2A, 0xFB36}, + RuneRange{0xFB38, 0xFB3C}, + RuneRange{0xFB3E, 0xFB3E}, + RuneRange{0xFB40, 0xFB41}, + RuneRange{0xFB43, 0xFB44}, + RuneRange{0xFB46, 0xFBB1}, + RuneRange{0xFBD3, 0xFD3D}, + RuneRange{0xFD50, 0xFD8F}, + RuneRange{0xFD92, 0xFDC7}, + RuneRange{0xFDF0, 0xFDFC}, + RuneRange{0xFE70, 0xFE74}, + RuneRange{0xFE76, 0xFEFC}, +} + +// TableD1 represents RFC-3454 Table D.1. +var TableD1 Set = tableD1 + +var tableD2 = Set{ + RuneRange{0x0041, 0x005A}, + RuneRange{0x0061, 0x007A}, + RuneRange{0x00AA, 0x00AA}, + RuneRange{0x00B5, 0x00B5}, + RuneRange{0x00BA, 0x00BA}, + RuneRange{0x00C0, 0x00D6}, + RuneRange{0x00D8, 0x00F6}, + RuneRange{0x00F8, 0x0220}, + RuneRange{0x0222, 0x0233}, + RuneRange{0x0250, 0x02AD}, + RuneRange{0x02B0, 0x02B8}, + RuneRange{0x02BB, 0x02C1}, + RuneRange{0x02D0, 0x02D1}, + RuneRange{0x02E0, 0x02E4}, + RuneRange{0x02EE, 0x02EE}, + RuneRange{0x037A, 0x037A}, + RuneRange{0x0386, 0x0386}, + RuneRange{0x0388, 0x038A}, + RuneRange{0x038C, 0x038C}, + RuneRange{0x038E, 0x03A1}, + RuneRange{0x03A3, 0x03CE}, + RuneRange{0x03D0, 0x03F5}, + RuneRange{0x0400, 0x0482}, + RuneRange{0x048A, 0x04CE}, + RuneRange{0x04D0, 0x04F5}, + RuneRange{0x04F8, 0x04F9}, + RuneRange{0x0500, 0x050F}, + RuneRange{0x0531, 0x0556}, + RuneRange{0x0559, 0x055F}, + RuneRange{0x0561, 0x0587}, + RuneRange{0x0589, 0x0589}, + RuneRange{0x0903, 0x0903}, + RuneRange{0x0905, 0x0939}, + RuneRange{0x093D, 0x0940}, + RuneRange{0x0949, 0x094C}, + RuneRange{0x0950, 0x0950}, + RuneRange{0x0958, 0x0961}, + RuneRange{0x0964, 0x0970}, + RuneRange{0x0982, 0x0983}, + RuneRange{0x0985, 0x098C}, + RuneRange{0x098F, 0x0990}, + RuneRange{0x0993, 0x09A8}, + RuneRange{0x09AA, 0x09B0}, + RuneRange{0x09B2, 0x09B2}, + RuneRange{0x09B6, 0x09B9}, + RuneRange{0x09BE, 0x09C0}, + RuneRange{0x09C7, 0x09C8}, + RuneRange{0x09CB, 0x09CC}, + RuneRange{0x09D7, 0x09D7}, + RuneRange{0x09DC, 0x09DD}, + RuneRange{0x09DF, 0x09E1}, + RuneRange{0x09E6, 0x09F1}, + RuneRange{0x09F4, 0x09FA}, + RuneRange{0x0A05, 0x0A0A}, + RuneRange{0x0A0F, 0x0A10}, + RuneRange{0x0A13, 0x0A28}, + RuneRange{0x0A2A, 0x0A30}, + RuneRange{0x0A32, 0x0A33}, + RuneRange{0x0A35, 0x0A36}, + RuneRange{0x0A38, 0x0A39}, + RuneRange{0x0A3E, 0x0A40}, + RuneRange{0x0A59, 0x0A5C}, + RuneRange{0x0A5E, 0x0A5E}, + RuneRange{0x0A66, 0x0A6F}, + RuneRange{0x0A72, 0x0A74}, + RuneRange{0x0A83, 0x0A83}, + RuneRange{0x0A85, 0x0A8B}, + RuneRange{0x0A8D, 0x0A8D}, + RuneRange{0x0A8F, 0x0A91}, + RuneRange{0x0A93, 0x0AA8}, + RuneRange{0x0AAA, 0x0AB0}, + RuneRange{0x0AB2, 0x0AB3}, + RuneRange{0x0AB5, 0x0AB9}, + RuneRange{0x0ABD, 0x0AC0}, + RuneRange{0x0AC9, 0x0AC9}, + RuneRange{0x0ACB, 0x0ACC}, + RuneRange{0x0AD0, 0x0AD0}, + RuneRange{0x0AE0, 0x0AE0}, + RuneRange{0x0AE6, 0x0AEF}, + RuneRange{0x0B02, 0x0B03}, + RuneRange{0x0B05, 0x0B0C}, + RuneRange{0x0B0F, 0x0B10}, + RuneRange{0x0B13, 0x0B28}, + RuneRange{0x0B2A, 0x0B30}, + RuneRange{0x0B32, 0x0B33}, + RuneRange{0x0B36, 0x0B39}, + RuneRange{0x0B3D, 0x0B3E}, + RuneRange{0x0B40, 0x0B40}, + RuneRange{0x0B47, 0x0B48}, + RuneRange{0x0B4B, 0x0B4C}, + RuneRange{0x0B57, 0x0B57}, + RuneRange{0x0B5C, 0x0B5D}, + RuneRange{0x0B5F, 0x0B61}, + RuneRange{0x0B66, 0x0B70}, + RuneRange{0x0B83, 0x0B83}, + RuneRange{0x0B85, 0x0B8A}, + RuneRange{0x0B8E, 0x0B90}, + RuneRange{0x0B92, 0x0B95}, + RuneRange{0x0B99, 0x0B9A}, + RuneRange{0x0B9C, 0x0B9C}, + RuneRange{0x0B9E, 0x0B9F}, + RuneRange{0x0BA3, 0x0BA4}, + RuneRange{0x0BA8, 0x0BAA}, + RuneRange{0x0BAE, 0x0BB5}, + RuneRange{0x0BB7, 0x0BB9}, + RuneRange{0x0BBE, 0x0BBF}, + RuneRange{0x0BC1, 0x0BC2}, + RuneRange{0x0BC6, 0x0BC8}, + RuneRange{0x0BCA, 0x0BCC}, + RuneRange{0x0BD7, 0x0BD7}, + RuneRange{0x0BE7, 0x0BF2}, + RuneRange{0x0C01, 0x0C03}, + RuneRange{0x0C05, 0x0C0C}, + RuneRange{0x0C0E, 0x0C10}, + RuneRange{0x0C12, 0x0C28}, + RuneRange{0x0C2A, 0x0C33}, + RuneRange{0x0C35, 0x0C39}, + RuneRange{0x0C41, 0x0C44}, + RuneRange{0x0C60, 0x0C61}, + RuneRange{0x0C66, 0x0C6F}, + RuneRange{0x0C82, 0x0C83}, + RuneRange{0x0C85, 0x0C8C}, + RuneRange{0x0C8E, 0x0C90}, + RuneRange{0x0C92, 0x0CA8}, + RuneRange{0x0CAA, 0x0CB3}, + RuneRange{0x0CB5, 0x0CB9}, + RuneRange{0x0CBE, 0x0CBE}, + RuneRange{0x0CC0, 0x0CC4}, + RuneRange{0x0CC7, 0x0CC8}, + RuneRange{0x0CCA, 0x0CCB}, + RuneRange{0x0CD5, 0x0CD6}, + RuneRange{0x0CDE, 0x0CDE}, + RuneRange{0x0CE0, 0x0CE1}, + RuneRange{0x0CE6, 0x0CEF}, + RuneRange{0x0D02, 0x0D03}, + RuneRange{0x0D05, 0x0D0C}, + RuneRange{0x0D0E, 0x0D10}, + RuneRange{0x0D12, 0x0D28}, + RuneRange{0x0D2A, 0x0D39}, + RuneRange{0x0D3E, 0x0D40}, + RuneRange{0x0D46, 0x0D48}, + RuneRange{0x0D4A, 0x0D4C}, + RuneRange{0x0D57, 0x0D57}, + RuneRange{0x0D60, 0x0D61}, + RuneRange{0x0D66, 0x0D6F}, + RuneRange{0x0D82, 0x0D83}, + RuneRange{0x0D85, 0x0D96}, + RuneRange{0x0D9A, 0x0DB1}, + RuneRange{0x0DB3, 0x0DBB}, + RuneRange{0x0DBD, 0x0DBD}, + RuneRange{0x0DC0, 0x0DC6}, + RuneRange{0x0DCF, 0x0DD1}, + RuneRange{0x0DD8, 0x0DDF}, + RuneRange{0x0DF2, 0x0DF4}, + RuneRange{0x0E01, 0x0E30}, + RuneRange{0x0E32, 0x0E33}, + RuneRange{0x0E40, 0x0E46}, + RuneRange{0x0E4F, 0x0E5B}, + RuneRange{0x0E81, 0x0E82}, + RuneRange{0x0E84, 0x0E84}, + RuneRange{0x0E87, 0x0E88}, + RuneRange{0x0E8A, 0x0E8A}, + RuneRange{0x0E8D, 0x0E8D}, + RuneRange{0x0E94, 0x0E97}, + RuneRange{0x0E99, 0x0E9F}, + RuneRange{0x0EA1, 0x0EA3}, + RuneRange{0x0EA5, 0x0EA5}, + RuneRange{0x0EA7, 0x0EA7}, + RuneRange{0x0EAA, 0x0EAB}, + RuneRange{0x0EAD, 0x0EB0}, + RuneRange{0x0EB2, 0x0EB3}, + RuneRange{0x0EBD, 0x0EBD}, + RuneRange{0x0EC0, 0x0EC4}, + RuneRange{0x0EC6, 0x0EC6}, + RuneRange{0x0ED0, 0x0ED9}, + RuneRange{0x0EDC, 0x0EDD}, + RuneRange{0x0F00, 0x0F17}, + RuneRange{0x0F1A, 0x0F34}, + RuneRange{0x0F36, 0x0F36}, + RuneRange{0x0F38, 0x0F38}, + RuneRange{0x0F3E, 0x0F47}, + RuneRange{0x0F49, 0x0F6A}, + RuneRange{0x0F7F, 0x0F7F}, + RuneRange{0x0F85, 0x0F85}, + RuneRange{0x0F88, 0x0F8B}, + RuneRange{0x0FBE, 0x0FC5}, + RuneRange{0x0FC7, 0x0FCC}, + RuneRange{0x0FCF, 0x0FCF}, + RuneRange{0x1000, 0x1021}, + RuneRange{0x1023, 0x1027}, + RuneRange{0x1029, 0x102A}, + RuneRange{0x102C, 0x102C}, + RuneRange{0x1031, 0x1031}, + RuneRange{0x1038, 0x1038}, + RuneRange{0x1040, 0x1057}, + RuneRange{0x10A0, 0x10C5}, + RuneRange{0x10D0, 0x10F8}, + RuneRange{0x10FB, 0x10FB}, + RuneRange{0x1100, 0x1159}, + RuneRange{0x115F, 0x11A2}, + RuneRange{0x11A8, 0x11F9}, + RuneRange{0x1200, 0x1206}, + RuneRange{0x1208, 0x1246}, + RuneRange{0x1248, 0x1248}, + RuneRange{0x124A, 0x124D}, + RuneRange{0x1250, 0x1256}, + RuneRange{0x1258, 0x1258}, + RuneRange{0x125A, 0x125D}, + RuneRange{0x1260, 0x1286}, + RuneRange{0x1288, 0x1288}, + RuneRange{0x128A, 0x128D}, + RuneRange{0x1290, 0x12AE}, + RuneRange{0x12B0, 0x12B0}, + RuneRange{0x12B2, 0x12B5}, + RuneRange{0x12B8, 0x12BE}, + RuneRange{0x12C0, 0x12C0}, + RuneRange{0x12C2, 0x12C5}, + RuneRange{0x12C8, 0x12CE}, + RuneRange{0x12D0, 0x12D6}, + RuneRange{0x12D8, 0x12EE}, + RuneRange{0x12F0, 0x130E}, + RuneRange{0x1310, 0x1310}, + RuneRange{0x1312, 0x1315}, + RuneRange{0x1318, 0x131E}, + RuneRange{0x1320, 0x1346}, + RuneRange{0x1348, 0x135A}, + RuneRange{0x1361, 0x137C}, + RuneRange{0x13A0, 0x13F4}, + RuneRange{0x1401, 0x1676}, + RuneRange{0x1681, 0x169A}, + RuneRange{0x16A0, 0x16F0}, + RuneRange{0x1700, 0x170C}, + RuneRange{0x170E, 0x1711}, + RuneRange{0x1720, 0x1731}, + RuneRange{0x1735, 0x1736}, + RuneRange{0x1740, 0x1751}, + RuneRange{0x1760, 0x176C}, + RuneRange{0x176E, 0x1770}, + RuneRange{0x1780, 0x17B6}, + RuneRange{0x17BE, 0x17C5}, + RuneRange{0x17C7, 0x17C8}, + RuneRange{0x17D4, 0x17DA}, + RuneRange{0x17DC, 0x17DC}, + RuneRange{0x17E0, 0x17E9}, + RuneRange{0x1810, 0x1819}, + RuneRange{0x1820, 0x1877}, + RuneRange{0x1880, 0x18A8}, + RuneRange{0x1E00, 0x1E9B}, + RuneRange{0x1EA0, 0x1EF9}, + RuneRange{0x1F00, 0x1F15}, + RuneRange{0x1F18, 0x1F1D}, + RuneRange{0x1F20, 0x1F45}, + RuneRange{0x1F48, 0x1F4D}, + RuneRange{0x1F50, 0x1F57}, + RuneRange{0x1F59, 0x1F59}, + RuneRange{0x1F5B, 0x1F5B}, + RuneRange{0x1F5D, 0x1F5D}, + RuneRange{0x1F5F, 0x1F7D}, + RuneRange{0x1F80, 0x1FB4}, + RuneRange{0x1FB6, 0x1FBC}, + RuneRange{0x1FBE, 0x1FBE}, + RuneRange{0x1FC2, 0x1FC4}, + RuneRange{0x1FC6, 0x1FCC}, + RuneRange{0x1FD0, 0x1FD3}, + RuneRange{0x1FD6, 0x1FDB}, + RuneRange{0x1FE0, 0x1FEC}, + RuneRange{0x1FF2, 0x1FF4}, + RuneRange{0x1FF6, 0x1FFC}, + RuneRange{0x200E, 0x200E}, + RuneRange{0x2071, 0x2071}, + RuneRange{0x207F, 0x207F}, + RuneRange{0x2102, 0x2102}, + RuneRange{0x2107, 0x2107}, + RuneRange{0x210A, 0x2113}, + RuneRange{0x2115, 0x2115}, + RuneRange{0x2119, 0x211D}, + RuneRange{0x2124, 0x2124}, + RuneRange{0x2126, 0x2126}, + RuneRange{0x2128, 0x2128}, + RuneRange{0x212A, 0x212D}, + RuneRange{0x212F, 0x2131}, + RuneRange{0x2133, 0x2139}, + RuneRange{0x213D, 0x213F}, + RuneRange{0x2145, 0x2149}, + RuneRange{0x2160, 0x2183}, + RuneRange{0x2336, 0x237A}, + RuneRange{0x2395, 0x2395}, + RuneRange{0x249C, 0x24E9}, + RuneRange{0x3005, 0x3007}, + RuneRange{0x3021, 0x3029}, + RuneRange{0x3031, 0x3035}, + RuneRange{0x3038, 0x303C}, + RuneRange{0x3041, 0x3096}, + RuneRange{0x309D, 0x309F}, + RuneRange{0x30A1, 0x30FA}, + RuneRange{0x30FC, 0x30FF}, + RuneRange{0x3105, 0x312C}, + RuneRange{0x3131, 0x318E}, + RuneRange{0x3190, 0x31B7}, + RuneRange{0x31F0, 0x321C}, + RuneRange{0x3220, 0x3243}, + RuneRange{0x3260, 0x327B}, + RuneRange{0x327F, 0x32B0}, + RuneRange{0x32C0, 0x32CB}, + RuneRange{0x32D0, 0x32FE}, + RuneRange{0x3300, 0x3376}, + RuneRange{0x337B, 0x33DD}, + RuneRange{0x33E0, 0x33FE}, + RuneRange{0x3400, 0x4DB5}, + RuneRange{0x4E00, 0x9FA5}, + RuneRange{0xA000, 0xA48C}, + RuneRange{0xAC00, 0xD7A3}, + RuneRange{0xD800, 0xFA2D}, + RuneRange{0xFA30, 0xFA6A}, + RuneRange{0xFB00, 0xFB06}, + RuneRange{0xFB13, 0xFB17}, + RuneRange{0xFF21, 0xFF3A}, + RuneRange{0xFF41, 0xFF5A}, + RuneRange{0xFF66, 0xFFBE}, + RuneRange{0xFFC2, 0xFFC7}, + RuneRange{0xFFCA, 0xFFCF}, + RuneRange{0xFFD2, 0xFFD7}, + RuneRange{0xFFDA, 0xFFDC}, + RuneRange{0x10300, 0x1031E}, + RuneRange{0x10320, 0x10323}, + RuneRange{0x10330, 0x1034A}, + RuneRange{0x10400, 0x10425}, + RuneRange{0x10428, 0x1044D}, + RuneRange{0x1D000, 0x1D0F5}, + RuneRange{0x1D100, 0x1D126}, + RuneRange{0x1D12A, 0x1D166}, + RuneRange{0x1D16A, 0x1D172}, + RuneRange{0x1D183, 0x1D184}, + RuneRange{0x1D18C, 0x1D1A9}, + RuneRange{0x1D1AE, 0x1D1DD}, + RuneRange{0x1D400, 0x1D454}, + RuneRange{0x1D456, 0x1D49C}, + RuneRange{0x1D49E, 0x1D49F}, + RuneRange{0x1D4A2, 0x1D4A2}, + RuneRange{0x1D4A5, 0x1D4A6}, + RuneRange{0x1D4A9, 0x1D4AC}, + RuneRange{0x1D4AE, 0x1D4B9}, + RuneRange{0x1D4BB, 0x1D4BB}, + RuneRange{0x1D4BD, 0x1D4C0}, + RuneRange{0x1D4C2, 0x1D4C3}, + RuneRange{0x1D4C5, 0x1D505}, + RuneRange{0x1D507, 0x1D50A}, + RuneRange{0x1D50D, 0x1D514}, + RuneRange{0x1D516, 0x1D51C}, + RuneRange{0x1D51E, 0x1D539}, + RuneRange{0x1D53B, 0x1D53E}, + RuneRange{0x1D540, 0x1D544}, + RuneRange{0x1D546, 0x1D546}, + RuneRange{0x1D54A, 0x1D550}, + RuneRange{0x1D552, 0x1D6A3}, + RuneRange{0x1D6A8, 0x1D7C9}, + RuneRange{0x20000, 0x2A6D6}, + RuneRange{0x2F800, 0x2FA1D}, + RuneRange{0xF0000, 0xFFFFD}, + RuneRange{0x100000, 0x10FFFD}, +} + +// TableD2 represents RFC-3454 Table D.2. +var TableD2 Set = tableD2 diff --git a/hotelReservation/vendor/github.com/youmark/pkcs8/.gitignore b/hotelReservation/vendor/github.com/youmark/pkcs8/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/hotelReservation/vendor/github.com/youmark/pkcs8/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/hotelReservation/vendor/github.com/youmark/pkcs8/.travis.yml b/hotelReservation/vendor/github.com/youmark/pkcs8/.travis.yml new file mode 100644 index 000000000..0bceef6fa --- /dev/null +++ b/hotelReservation/vendor/github.com/youmark/pkcs8/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - "1.9.x" + - "1.10.x" + - master + +script: + - go test -v ./... diff --git a/hotelReservation/vendor/github.com/youmark/pkcs8/LICENSE b/hotelReservation/vendor/github.com/youmark/pkcs8/LICENSE new file mode 100644 index 000000000..c939f4481 --- /dev/null +++ b/hotelReservation/vendor/github.com/youmark/pkcs8/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 youmark + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/hotelReservation/vendor/github.com/youmark/pkcs8/README b/hotelReservation/vendor/github.com/youmark/pkcs8/README new file mode 100644 index 000000000..376fcaf64 --- /dev/null +++ b/hotelReservation/vendor/github.com/youmark/pkcs8/README @@ -0,0 +1 @@ +pkcs8 package: implement PKCS#8 private key parsing and conversion as defined in RFC5208 and RFC5958 diff --git a/hotelReservation/vendor/github.com/youmark/pkcs8/README.md b/hotelReservation/vendor/github.com/youmark/pkcs8/README.md new file mode 100644 index 000000000..f2167dbfe --- /dev/null +++ b/hotelReservation/vendor/github.com/youmark/pkcs8/README.md @@ -0,0 +1,21 @@ +pkcs8 +=== +OpenSSL can generate private keys in both "traditional format" and PKCS#8 format. Newer applications are advised to use more secure PKCS#8 format. Go standard crypto package provides a [function](http://golang.org/pkg/crypto/x509/#ParsePKCS8PrivateKey) to parse private key in PKCS#8 format. There is a limitation to this function. It can only handle unencrypted PKCS#8 private keys. To use this function, the user has to save the private key in file without encryption, which is a bad practice to leave private keys unprotected on file systems. In addition, Go standard package lacks the functions to convert RSA/ECDSA private keys into PKCS#8 format. + +pkcs8 package fills the gap here. It implements functions to process private keys in PKCS#8 format, as defined in [RFC5208](https://tools.ietf.org/html/rfc5208) and [RFC5958](https://tools.ietf.org/html/rfc5958). It can handle both unencrypted PKCS#8 PrivateKeyInfo format and EncryptedPrivateKeyInfo format with PKCS#5 (v2.0) algorithms. + + +[**Godoc**](http://godoc.org/github.com/youmark/pkcs8) + +## Installation +Supports Go 1.9+ + +```text +go get github.com/youmark/pkcs8 +``` +## dependency +This package depends on golang.org/x/crypto/pbkdf2 package. Use the following command to retrive pbkdf2 package +```text +go get golang.org/x/crypto/pbkdf2 +``` + diff --git a/hotelReservation/vendor/github.com/youmark/pkcs8/pkcs8.go b/hotelReservation/vendor/github.com/youmark/pkcs8/pkcs8.go new file mode 100644 index 000000000..9270a7970 --- /dev/null +++ b/hotelReservation/vendor/github.com/youmark/pkcs8/pkcs8.go @@ -0,0 +1,305 @@ +// Package pkcs8 implements functions to parse and convert private keys in PKCS#8 format, as defined in RFC5208 and RFC5958 +package pkcs8 + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pbkdf2" +) + +// Copy from crypto/x509 +var ( + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} +) + +// Copy from crypto/x509 +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} +) + +// Copy from crypto/x509 +func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return oidNamedCurveP224, true + case elliptic.P256(): + return oidNamedCurveP256, true + case elliptic.P384(): + return oidNamedCurveP384, true + case elliptic.P521(): + return oidNamedCurveP521, true + } + + return nil, false +} + +// Unecrypted PKCS8 +var ( + oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} + oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} + oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} + oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} + oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} + oidDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} +) + +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +type privateKeyInfo struct { + Version int + PrivateKeyAlgorithm []asn1.ObjectIdentifier + PrivateKey []byte +} + +// Encrypted PKCS8 +type prfParam struct { + IdPRF asn1.ObjectIdentifier + NullParam asn1.RawValue +} + +type pbkdf2Params struct { + Salt []byte + IterationCount int + PrfParam prfParam `asn1:"optional"` +} + +type pbkdf2Algorithms struct { + IdPBKDF2 asn1.ObjectIdentifier + PBKDF2Params pbkdf2Params +} + +type pbkdf2Encs struct { + EncryAlgo asn1.ObjectIdentifier + IV []byte +} + +type pbes2Params struct { + KeyDerivationFunc pbkdf2Algorithms + EncryptionScheme pbkdf2Encs +} + +type pbes2Algorithms struct { + IdPBES2 asn1.ObjectIdentifier + PBES2Params pbes2Params +} + +type encryptedPrivateKeyInfo struct { + EncryptionAlgorithm pbes2Algorithms + EncryptedData []byte +} + +// ParsePKCS8PrivateKeyRSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +// +// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. +func ParsePKCS8PrivateKeyRSA(der []byte, v ...[]byte) (*rsa.PrivateKey, error) { + key, err := ParsePKCS8PrivateKey(der, v...) + if err != nil { + return nil, err + } + typedKey, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("key block is not of type RSA") + } + return typedKey, nil +} + +// ParsePKCS8PrivateKeyECDSA parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +// +// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. +func ParsePKCS8PrivateKeyECDSA(der []byte, v ...[]byte) (*ecdsa.PrivateKey, error) { + key, err := ParsePKCS8PrivateKey(der, v...) + if err != nil { + return nil, err + } + typedKey, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("key block is not of type ECDSA") + } + return typedKey, nil +} + +// ParsePKCS8PrivateKey parses encrypted/unencrypted private keys in PKCS#8 format. To parse encrypted private keys, a password of []byte type should be provided to the function as the second parameter. +// +// The function can decrypt the private key encrypted with AES-256-CBC mode, and stored in PKCS #5 v2.0 format. +func ParsePKCS8PrivateKey(der []byte, v ...[]byte) (interface{}, error) { + // No password provided, assume the private key is unencrypted + if v == nil { + return x509.ParsePKCS8PrivateKey(der) + } + + // Use the password provided to decrypt the private key + password := v[0] + var privKey encryptedPrivateKeyInfo + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") + } + + if !privKey.EncryptionAlgorithm.IdPBES2.Equal(oidPBES2) { + return nil, errors.New("pkcs8: only PBES2 supported") + } + + if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IdPBKDF2.Equal(oidPKCS5PBKDF2) { + return nil, errors.New("pkcs8: only PBKDF2 supported") + } + + encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme + kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params + + iv := encParam.IV + salt := kdfParam.Salt + iter := kdfParam.IterationCount + keyHash := sha1.New + if kdfParam.PrfParam.IdPRF.Equal(oidHMACWithSHA256) { + keyHash = sha256.New + } + + encryptedKey := privKey.EncryptedData + var symkey []byte + var block cipher.Block + var err error + switch { + case encParam.EncryAlgo.Equal(oidAES128CBC): + symkey = pbkdf2.Key(password, salt, iter, 16, keyHash) + block, err = aes.NewCipher(symkey) + case encParam.EncryAlgo.Equal(oidAES256CBC): + symkey = pbkdf2.Key(password, salt, iter, 32, keyHash) + block, err = aes.NewCipher(symkey) + case encParam.EncryAlgo.Equal(oidDESEDE3CBC): + symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) + block, err = des.NewTripleDESCipher(symkey) + default: + return nil, errors.New("pkcs8: only AES-256-CBC, AES-128-CBC and DES-EDE3-CBC are supported") + } + if err != nil { + return nil, err + } + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(encryptedKey, encryptedKey) + + key, err := x509.ParsePKCS8PrivateKey(encryptedKey) + if err != nil { + return nil, errors.New("pkcs8: incorrect password") + } + return key, nil +} + +func convertPrivateKeyToPKCS8(priv interface{}) ([]byte, error) { + var pkey privateKeyInfo + + switch priv := priv.(type) { + case *ecdsa.PrivateKey: + eckey, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return nil, err + } + + oidNamedCurve, ok := oidFromNamedCurve(priv.Curve) + if !ok { + return nil, errors.New("pkcs8: unknown elliptic curve") + } + + // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). + // But openssl set to v1 even publicKey is present + pkey.Version = 1 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA + pkey.PrivateKeyAlgorithm[1] = oidNamedCurve + pkey.PrivateKey = eckey + case *rsa.PrivateKey: + + // Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0). + // But openssl set to v1 even publicKey is present + pkey.Version = 0 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyRSA + pkey.PrivateKey = x509.MarshalPKCS1PrivateKey(priv) + } + + return asn1.Marshal(pkey) +} + +func convertPrivateKeyToPKCS8Encrypted(priv interface{}, password []byte) ([]byte, error) { + // Convert private key into PKCS8 format + pkey, err := convertPrivateKeyToPKCS8(priv) + if err != nil { + return nil, err + } + + // Calculate key from password based on PKCS5 algorithm + // Use 8 byte salt, 16 byte IV, and 2048 iteration + iter := 2048 + salt := make([]byte, 8) + iv := make([]byte, 16) + _, err = rand.Read(salt) + if err != nil { + return nil, err + } + _, err = rand.Read(iv) + if err != nil { + return nil, err + } + + key := pbkdf2.Key(password, salt, iter, 32, sha256.New) + + // Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme + padding := aes.BlockSize - len(pkey)%aes.BlockSize + if padding > 0 { + n := len(pkey) + pkey = append(pkey, make([]byte, padding)...) + for i := 0; i < padding; i++ { + pkey[n+i] = byte(padding) + } + } + + encryptedKey := make([]byte, len(pkey)) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(encryptedKey, pkey) + + // pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter, prfParam{oidHMACWithSHA256}}} + + pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter, prfParam{oidHMACWithSHA256, asn1.RawValue{Tag: asn1.TagNull}}}} + pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv} + pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}} + + encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey} + + return asn1.Marshal(encryptedPkey) +} + +// ConvertPrivateKeyToPKCS8 converts the private key into PKCS#8 format. +// To encrypt the private key, the password of []byte type should be provided as the second parameter. +// +// The only supported key types are RSA and ECDSA (*rsa.PublicKey or *ecdsa.PublicKey for priv) +func ConvertPrivateKeyToPKCS8(priv interface{}, v ...[]byte) ([]byte, error) { + if v == nil { + return convertPrivateKeyToPKCS8(priv) + } + + password := string(v[0]) + return convertPrivateKeyToPKCS8Encrypted(priv, []byte(password)) +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/LICENSE b/hotelReservation/vendor/go.mongodb.org/mongo-driver/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bson.go new file mode 100644 index 000000000..a0d818582 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -0,0 +1,50 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer +// See THIRD-PARTY-NOTICES for original license terms. + +package bson // import "go.mongodb.org/mongo-driver/bson" + +import ( + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, +// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. +// +// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. +// +// Example usage: +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +type D = primitive.D + +// E represents a BSON element for a D. It is usually used inside a D. +type E = primitive.E + +// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not +// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be +// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. +// +// Example usage: +// +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +type M = primitive.M + +// An A is an ordered representation of a BSON array. +// +// Example usage: +// +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +type A = primitive.A diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go new file mode 100644 index 000000000..6ca8d9ad6 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -0,0 +1,56 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// ArrayCodec is the Codec used for bsoncore.Array values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ArrayCodec registered. +type ArrayCodec struct{} + +var defaultArrayCodec = NewArrayCodec() + +// NewArrayCodec returns an ArrayCodec. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ArrayCodec registered. +func NewArrayCodec() *ArrayCodec { + return &ArrayCodec{} +} + +// EncodeValue is the ValueEncoder for bsoncore.Array values. +func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreArray { + return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} + } + + arr := val.Interface().(bsoncore.Array) + return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) +} + +// DecodeValue is the ValueDecoder for bsoncore.Array values. +func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreArray { + return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) + val.Set(reflect.ValueOf(arr)) + return err +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go new file mode 100644 index 000000000..0693bd432 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -0,0 +1,382 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" + +import ( + "fmt" + "reflect" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var ( + emptyValue = reflect.Value{} +) + +// Marshaler is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. +type Marshaler interface { + MarshalBSON() ([]byte, error) +} + +// ValueMarshaler is an interface implemented by types that can marshal +// themselves into a BSON value as bytes. The type must be the valid type for +// the bytes returned. The bytes and byte type together must be valid if the +// error is nil. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. +type ValueMarshaler interface { + MarshalBSONValue() (bsontype.Type, []byte, error) +} + +// Unmarshaler is an interface implemented by types that can unmarshal a BSON +// document representation of themselves. The BSON bytes can be assumed to be +// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data +// after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. +type Unmarshaler interface { + UnmarshalBSON([]byte) error +} + +// ValueUnmarshaler is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. +type ValueUnmarshaler interface { + UnmarshalBSONValue(bsontype.Type, []byte) error +} + +// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be +// encoded by the ValueEncoder. +type ValueEncoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vee ValueEncoderError) Error() string { + typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) + for _, t := range vee.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vee.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vee.Received.Kind().String() + if vee.Received.IsValid() { + received = vee.Received.Type().String() + } + return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) +} + +// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be +// decoded by the ValueDecoder. +type ValueDecoderError struct { + Name string + Types []reflect.Type + Kinds []reflect.Kind + Received reflect.Value +} + +func (vde ValueDecoderError) Error() string { + typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) + for _, t := range vde.Types { + typeKinds = append(typeKinds, t.String()) + } + for _, k := range vde.Kinds { + if k == reflect.Map { + typeKinds = append(typeKinds, "map[string]*") + continue + } + typeKinds = append(typeKinds, k.String()) + } + received := vde.Received.Kind().String() + if vde.Received.IsValid() { + received = vde.Received.Type().String() + } + return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) +} + +// EncodeContext is the contextual information required for a Codec to encode a +// value. +type EncodeContext struct { + *Registry + + // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) + // that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. + MinSize bool + + errorOnInlineDuplicates bool + stringifyMapKeysWithFmt bool + nilMapAsEmpty bool + nilSliceAsEmpty bool + nilByteSliceAsEmpty bool + omitZeroStruct bool + useJSONStructTags bool +} + +// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in +// the marshaled BSON when the "inline" struct tag option is set. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (ec *EncodeContext) ErrorOnInlineDuplicates() { + ec.errorOnInlineDuplicates = true +} + +// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name +// strings using fmt.Sprintf() instead of the default string conversion logic. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (ec *EncodeContext) StringifyMapKeysWithFmt() { + ec.stringifyMapKeysWithFmt = true +} + +// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (ec *EncodeContext) NilMapAsEmpty() { + ec.nilMapAsEmpty = true +} + +// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON +// null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (ec *EncodeContext) NilSliceAsEmpty() { + ec.nilSliceAsEmpty = true +} + +// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values +// instead of BSON null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (ec *EncodeContext) NilByteSliceAsEmpty() { + ec.nilByteSliceAsEmpty = true +} + +// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) +// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. +// +// Note that the Encoder only examines exported struct fields when determining if a struct is the +// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (ec *EncodeContext) OmitZeroStruct() { + ec.omitZeroStruct = true +} + +// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. +func (ec *EncodeContext) UseJSONStructTags() { + ec.useJSONStructTags = true +} + +// DecodeContext is the contextual information required for a Codec to decode a +// value. +type DecodeContext struct { + *Registry + + // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" + // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, + // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to + // BSON "decimal128" values. + // + // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. + Truncate bool + + // Ancestor is the type of a containing document. This is mainly used to determine what type + // should be used when decoding an embedded document into an empty interface. For example, if + // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface + // will be decoded into a bson.M. + // + // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. + Ancestor reflect.Type + + // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the + // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is + // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an + // error. DocumentType overrides the Ancestor field. + defaultDocumentType reflect.Type + + binaryAsSlice bool + useJSONStructTags bool + useLocalTimeZone bool + zeroMaps bool + zeroStructs bool +} + +// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or +// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (dc *DecodeContext) BinaryAsSlice() { + dc.binaryAsSlice = true +} + +// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" +// struct tag is not specified. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +func (dc *DecodeContext) UseJSONStructTags() { + dc.useJSONStructTags = true +} + +// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead +// of the UTC timezone. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (dc *DecodeContext) UseLocalTimeZone() { + dc.useLocalTimeZone = true +} + +// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value +// passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (dc *DecodeContext) ZeroMaps() { + dc.zeroMaps = true +} + +// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination +// value passed to Decode before unmarshaling BSON documents into them. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (dc *DecodeContext) ZeroStructs() { + dc.zeroStructs = true +} + +// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. +func (dc *DecodeContext) DefaultDocumentM() { + dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) +} + +// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This +// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. +func (dc *DecodeContext) DefaultDocumentD() { + dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) +} + +// ValueCodec is an interface for encoding and decoding a reflect.Value. +// values. +// +// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. +type ValueCodec interface { + ValueEncoder + ValueDecoder +} + +// ValueEncoder is the interface implemented by types that can handle the encoding of a value. +type ValueEncoder interface { + EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error +} + +// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueEncoder. +type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error + +// EncodeValue implements the ValueEncoder interface. +func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + return fn(ec, vw, val) +} + +// ValueDecoder is the interface implemented by types that can handle the decoding of a value. +type ValueDecoder interface { + DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error +} + +// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be +// used as a ValueDecoder. +type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error + +// DecodeValue implements the ValueDecoder interface. +func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + return fn(dc, vr, val) +} + +// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. +type typeDecoder interface { + decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) +} + +// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. +type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) + +func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + return fn(dc, vr, t) +} + +// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. +type decodeAdapter struct { + ValueDecoderFunc + typeDecoderFunc +} + +var _ ValueDecoder = decodeAdapter{} +var _ typeDecoder = decodeAdapter{} + +// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type +// t and calls decoder.DecodeValue on it. +func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + td, _ := decoder.(typeDecoder) + return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) +} + +func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { + if td != nil { + val, err := td.decodeType(dc, vr, t) + if err == nil && convert && val.Type() != t { + // This conversion step is necessary for slices and maps. If a user declares variables like: + // + // type myBool bool + // var m map[string]myBool + // + // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present + // because we'll try to assign a value of type bool to one of type myBool. + val = val.Convert(t) + } + return val, err + } + + val := reflect.New(t).Elem() + err := vd.DecodeValue(dc, vr, val) + return val, err +} + +// CodecZeroer is the interface implemented by Codecs that can also determine if +// a value of the type that would be encoded is zero. +// +// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver +// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to +// nil instead. +type CodecZeroer interface { + IsTypeZero(interface{}) bool +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go new file mode 100644 index 000000000..dde3e7681 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -0,0 +1,123 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ByteSliceCodec is the Codec used for []byte values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ByteSliceCodec registered. +type ByteSliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values + // instead of BSON null. + // + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead. + EncodeNilAsEmpty bool +} + +var ( + defaultByteSliceCodec = NewByteSliceCodec() + + // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultByteSliceCodec +) + +// NewByteSliceCodec returns a ByteSliceCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// ByteSliceCodec registered. +func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { + byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) + codec := ByteSliceCodec{} + if byteSliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for []byte. +func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tByteSlice { + return emptyValue, ValueDecoderError{ + Name: "ByteSliceDecodeValue", + Types: []reflect.Type{tByteSlice}, + Received: reflect.Zero(t), + } + } + + var data []byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + data = []byte(str) + case bsontype.Symbol: + sym, err := vr.ReadSymbol() + if err != nil { + return emptyValue, err + } + data = []byte(sym) + case bsontype.Binary: + var subtype byte + data, subtype, err = vr.ReadBinary() + if err != nil { + return emptyValue, err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} + } + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(data), nil +} + +// DecodeValue is the ValueDecoder for []byte. +func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + elem, err := bsc.decodeType(dc, vr, tByteSlice) + if err != nil { + return err + } + + val.Set(elem) + return nil +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go new file mode 100644 index 000000000..844b50299 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go @@ -0,0 +1,166 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// Runtime check that the kind encoder and decoder caches can store any valid +// reflect.Kind constant. +func init() { + if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { + panic("The capacity of kindEncoderCache is too small.\n" + + "This is due to a new type being added to reflect.Kind.") + } +} + +// statically assert array size +var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] +var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] + +type typeEncoderCache struct { + cache sync.Map // map[reflect.Type]ValueEncoder +} + +func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { + c.cache.Store(rt, enc) +} + +func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueEncoder), true + } + return nil, false +} + +func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { + if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { + enc = v.(ValueEncoder) + } + return enc +} + +func (c *typeEncoderCache) Clone() *typeEncoderCache { + cc := new(typeEncoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +type typeDecoderCache struct { + cache sync.Map // map[reflect.Type]ValueDecoder +} + +func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { + c.cache.Store(rt, dec) +} + +func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { + if v, _ := c.cache.Load(rt); v != nil { + return v.(ValueDecoder), true + } + return nil, false +} + +func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { + if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { + dec = v.(ValueDecoder) + } + return dec +} + +func (c *typeDecoderCache) Clone() *typeDecoderCache { + cc := new(typeDecoderCache) + c.cache.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + cc.cache.Store(k, v) + } + return true + }) + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueEncoder interface). +type kindEncoderCacheEntry struct { + enc ValueEncoder +} + +type kindEncoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry +} + +func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { + if enc != nil && rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) + } +} + +func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { + return ent.enc, ent.enc != nil + } + } + return nil, false +} + +func (c *kindEncoderCache) Clone() *kindEncoderCache { + cc := new(kindEncoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} + +// atomic.Value requires that all calls to Store() have the same concrete type +// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type +// is always the same (since different concrete types may implement the +// ValueDecoder interface). +type kindDecoderCacheEntry struct { + dec ValueDecoder +} + +type kindDecoderCache struct { + entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry +} + +func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { + if rt < reflect.Kind(len(c.entries)) { + c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) + } +} + +func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { + if rt < reflect.Kind(len(c.entries)) { + if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { + return ent.dec, ent.dec != nil + } + } + return nil, false +} + +func (c *kindDecoderCache) Clone() *kindDecoderCache { + cc := new(kindDecoderCache) + for i, v := range c.entries { + if val := v.Load(); val != nil { + cc.entries[i].Store(val) + } + } + return cc +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go new file mode 100644 index 000000000..cb8180f25 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go @@ -0,0 +1,63 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" +) + +// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. +type condAddrEncoder struct { + canAddrEnc ValueEncoder + elseEnc ValueEncoder +} + +var _ ValueEncoder = (*condAddrEncoder)(nil) + +// newCondAddrEncoder returns an condAddrEncoder. +func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { + encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return &encoder +} + +// EncodeValue is the ValueEncoderFunc for a value that may be addressable. +func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.CanAddr() { + return cae.canAddrEnc.EncodeValue(ec, vw, val) + } + if cae.elseEnc != nil { + return cae.elseEnc.EncodeValue(ec, vw, val) + } + return ErrNoEncoder{Type: val.Type()} +} + +// condAddrDecoder is the decoder used when a pointer to the value has a decoder. +type condAddrDecoder struct { + canAddrDec ValueDecoder + elseDec ValueDecoder +} + +var _ ValueDecoder = (*condAddrDecoder)(nil) + +// newCondAddrDecoder returns an CondAddrDecoder. +func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { + decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} + return &decoder +} + +// DecodeValue is the ValueDecoderFunc for a value that may be addressable. +func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.CanAddr() { + return cad.canAddrDec.DecodeValue(dc, vr, val) + } + if cad.elseDec != nil { + return cad.elseDec.DecodeValue(dc, vr, val) + } + return ErrNoDecoder{Type: val.Type()} +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go new file mode 100644 index 000000000..e479c3585 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -0,0 +1,1807 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var ( + defaultValueDecoders DefaultValueDecoders + errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled") +) + +type decodeBinaryError struct { + subtype byte + typeName string +} + +func (d decodeBinaryError) Error() string { + return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) +} + +func newDefaultStructCodec() *StructCodec { + codec, err := NewStructCodec(DefaultStructTagParser) + if err != nil { + // This function is called from the codec registration path, so errors can't be propagated. If there's an error + // constructing the StructCodec, we panic to avoid losing it. + panic(fmt.Errorf("error creating default StructCodec: %v", err)) + } + return codec +} + +// DefaultValueDecoders is a namespace type for the default ValueDecoders used +// when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +type DefaultValueDecoders struct{} + +// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with +// the provided RegistryBuilder. +// +// There is no support for decoding map[string]interface{} because there is no decoder for +// interface{}, so users must either register this decoder themselves or use the +// EmptyInterfaceDecoder available in the bson package. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) + } + + intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} + floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} + + rb. + RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). + RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). + RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). + RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). + RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). + RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). + RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). + RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). + RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). + RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). + RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). + RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). + RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeDecoder(tTime, defaultTimeCodec). + RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeDecoder(tCoreArray, defaultArrayCodec). + RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). + RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). + RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). + RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). + RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). + RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). + RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). + RegisterDefaultDecoder(reflect.Int, intDecoder). + RegisterDefaultDecoder(reflect.Int8, intDecoder). + RegisterDefaultDecoder(reflect.Int16, intDecoder). + RegisterDefaultDecoder(reflect.Int32, intDecoder). + RegisterDefaultDecoder(reflect.Int64, intDecoder). + RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultDecoder(reflect.Float32, floatDecoder). + RegisterDefaultDecoder(reflect.Float64, floatDecoder). + RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). + RegisterDefaultDecoder(reflect.Map, defaultMapCodec). + RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultDecoder(reflect.String, defaultStringCodec). + RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). + RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). + RegisterTypeMapEntry(bsontype.Double, tFloat64). + RegisterTypeMapEntry(bsontype.String, tString). + RegisterTypeMapEntry(bsontype.Array, tA). + RegisterTypeMapEntry(bsontype.Binary, tBinary). + RegisterTypeMapEntry(bsontype.Undefined, tUndefined). + RegisterTypeMapEntry(bsontype.ObjectID, tOID). + RegisterTypeMapEntry(bsontype.Boolean, tBool). + RegisterTypeMapEntry(bsontype.DateTime, tDateTime). + RegisterTypeMapEntry(bsontype.Regex, tRegex). + RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). + RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). + RegisterTypeMapEntry(bsontype.Symbol, tSymbol). + RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). + RegisterTypeMapEntry(bsontype.Int32, tInt32). + RegisterTypeMapEntry(bsontype.Int64, tInt64). + RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). + RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). + RegisterTypeMapEntry(bsontype.MinKey, tMinKey). + RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). + RegisterTypeMapEntry(bsontype.Type(0), tD). + RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). + RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). + RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) +} + +// DDecodeValue is the ValueDecoderFunc for primitive.D instances. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || !val.CanSet() || val.Type() != tD { + return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + dc.Ancestor = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a primitive.D", vrType) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return err + } + tEmptyTypeDecoder, _ := decoder.(typeDecoder) + + // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. + var elems primitive.D + if !val.IsNil() { + val.SetLen(0) + elems = val.Interface().(primitive.D) + } else { + elems = make(primitive.D, 0) + } + + for { + key, elemVr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } else if err != nil { + return err + } + + // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. + elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) + if err != nil { + return err + } + + elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) + } + + val.Set(reflect.ValueOf(elems)) + return nil +} + +func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t.Kind() != reflect.Bool { + return emptyValue, ValueDecoderError{ + Name: "BooleanDecodeValue", + Kinds: []reflect.Kind{reflect.Bool}, + Received: reflect.Zero(t), + } + } + + var b bool + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + b = (i32 != 0) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + b = (i64 != 0) + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + b = (f64 != 0) + case bsontype.Boolean: + b, err = vr.ReadBoolean() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(b), nil +} + +// BooleanDecodeValue is the ValueDecoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { + return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + + elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) + if err != nil { + return err + } + + val.SetBool(elem.Bool()) + return nil +} + +func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return emptyValue, err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return emptyValue, errCannotTruncate + } + if f64 > float64(math.MaxInt64) { + return emptyValue, fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch t.Kind() { + case reflect.Int8: + if i64 < math.MinInt8 || i64 > math.MaxInt8 { + return emptyValue, fmt.Errorf("%d overflows int8", i64) + } + + return reflect.ValueOf(int8(i64)), nil + case reflect.Int16: + if i64 < math.MinInt16 || i64 > math.MaxInt16 { + return emptyValue, fmt.Errorf("%d overflows int16", i64) + } + + return reflect.ValueOf(int16(i64)), nil + case reflect.Int32: + if i64 < math.MinInt32 || i64 > math.MaxInt32 { + return emptyValue, fmt.Errorf("%d overflows int32", i64) + } + + return reflect.ValueOf(int32(i64)), nil + case reflect.Int64: + return reflect.ValueOf(i64), nil + case reflect.Int: + if int64(int(i64)) != i64 { // Can we fit this inside of an int + return emptyValue, fmt.Errorf("%d overflows int", i64) + } + + return reflect.ValueOf(int(i64)), nil + default: + return emptyValue, ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: reflect.Zero(t), + } + } +} + +// IntDecodeValue is the ValueDecoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "IntDecodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } + } + + elem, err := dvd.intDecodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.SetInt(elem.Int()) + return nil +} + +// UintDecodeValue is the ValueDecoderFunc for uint types. +// +// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var i64 int64 + var err error + switch vr.Type() { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") + } + if f64 > float64(math.MaxInt64) { + return fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return err + } + if b { + i64 = 1 + } + default: + return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) + } + + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + switch val.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return fmt.Errorf("%d overflows uint8", i64) + } + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return fmt.Errorf("%d overflows uint16", i64) + } + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return fmt.Errorf("%d overflows uint32", i64) + } + case reflect.Uint64: + if i64 < 0 { + return fmt.Errorf("%d overflows uint64", i64) + } + case reflect.Uint: + if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + return fmt.Errorf("%d overflows uint", i64) + } + default: + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + val.SetUint(uint64(i64)) + return nil +} + +func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var f float64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + f = float64(i32) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + f = float64(i64) + case bsontype.Double: + f, err = vr.ReadDouble() + if err != nil { + return emptyValue, err + } + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + f = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) + } + + switch t.Kind() { + case reflect.Float32: + if !dc.Truncate && float64(float32(f)) != f { + return emptyValue, errCannotTruncate + } + + return reflect.ValueOf(float32(f)), nil + case reflect.Float64: + return reflect.ValueOf(f), nil + default: + return emptyValue, ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: reflect.Zero(t), + } + } +} + +// FloatDecodeValue is the ValueDecoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "FloatDecodeValue", + Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, + Received: val, + } + } + + elem, err := dvd.floatDecodeType(ec, vr, val.Type()) + if err != nil { + return err + } + + val.SetFloat(elem.Float()) + return nil +} + +// StringDecodeValue is the ValueDecoderFunc for string types. +// +// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + var str string + var err error + switch vr.Type() { + // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + val.SetString(str) + return nil +} + +func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tJavaScript { + return emptyValue, ValueDecoderError{ + Name: "JavaScriptDecodeValue", + Types: []reflect.Type{tJavaScript}, + Received: reflect.Zero(t), + } + } + + var js string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.JavaScript: + js, err = vr.ReadJavascript() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.JavaScript(js)), nil +} + +// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJavaScript { + return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} + +func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tSymbol { + return emptyValue, ValueDecoderError{ + Name: "SymbolDecodeValue", + Types: []reflect.Type{tSymbol}, + Received: reflect.Zero(t), + } + } + + var symbol string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + symbol, err = vr.ReadString() + case bsontype.Symbol: + symbol, err = vr.ReadSymbol() + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return emptyValue, err + } + + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} + } + symbol = string(data) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Symbol(symbol)), nil +} + +// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tSymbol { + return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} + +func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tBinary { + return emptyValue, ValueDecoderError{ + Name: "BinaryDecodeValue", + Types: []reflect.Type{tBinary}, + Received: reflect.Zero(t), + } + } + + var data []byte + var subtype byte + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Binary: + data, subtype, err = vr.ReadBinary() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil +} + +// BinaryDecodeValue is the ValueDecoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tBinary { + return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + + elem, err := dvd.binaryDecodeType(dc, vr, tBinary) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tUndefined { + return emptyValue, ValueDecoderError{ + Name: "UndefinedDecodeValue", + Types: []reflect.Type{tUndefined}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Undefined{}), nil +} + +// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tUndefined { + return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// Accept both 12-byte string and pretty-printed 24-byte hex string formats. +func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tOID { + return emptyValue, ValueDecoderError{ + Name: "ObjectIDDecodeValue", + Types: []reflect.Type{tOID}, + Received: reflect.Zero(t), + } + } + + var oid primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.ObjectID: + oid, err = vr.ReadObjectID() + if err != nil { + return emptyValue, err + } + case bsontype.String: + str, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + if oid, err = primitive.ObjectIDFromHex(str); err == nil { + break + } + if len(str) != 12 { + return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) + } + byteArr := []byte(str) + copy(oid[:], byteArr) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) + } + + return reflect.ValueOf(oid), nil +} + +// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tOID { + return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} + } + + elem, err := dvd.objectIDDecodeType(dc, vr, tOID) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDateTime { + return emptyValue, ValueDecoderError{ + Name: "DateTimeDecodeValue", + Types: []reflect.Type{tDateTime}, + Received: reflect.Zero(t), + } + } + + var dt int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err = vr.ReadDateTime() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.DateTime(dt)), nil +} + +// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDateTime { + return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tNull { + return emptyValue, ValueDecoderError{ + Name: "NullDecodeValue", + Types: []reflect.Type{tNull}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Undefined: + err = vr.ReadUndefined() + case bsontype.Null: + err = vr.ReadNull() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Null{}), nil +} + +// NullDecodeValue is the ValueDecoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tNull { + return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + elem, err := dvd.nullDecodeType(dc, vr, tNull) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tRegex { + return emptyValue, ValueDecoderError{ + Name: "RegexDecodeValue", + Types: []reflect.Type{tRegex}, + Received: reflect.Zero(t), + } + } + + var pattern, options string + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Regex: + pattern, options, err = vr.ReadRegex() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil +} + +// RegexDecodeValue is the ValueDecoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tRegex { + return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + elem, err := dvd.regexDecodeType(dc, vr, tRegex) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDBPointer { + return emptyValue, ValueDecoderError{ + Name: "DBPointerDecodeValue", + Types: []reflect.Type{tDBPointer}, + Received: reflect.Zero(t), + } + } + + var ns string + var pointer primitive.ObjectID + var err error + switch vrType := vr.Type(); vrType { + case bsontype.DBPointer: + ns, pointer, err = vr.ReadDBPointer() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil +} + +// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDBPointer { + return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { + if reflectType != tTimestamp { + return emptyValue, ValueDecoderError{ + Name: "TimestampDecodeValue", + Types: []reflect.Type{tTimestamp}, + Received: reflect.Zero(reflectType), + } + } + + var t, incr uint32 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Timestamp: + t, incr, err = vr.ReadTimestamp() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil +} + +// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTimestamp { + return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tMinKey { + return emptyValue, ValueDecoderError{ + Name: "MinKeyDecodeValue", + Types: []reflect.Type{tMinKey}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MinKey: + err = vr.ReadMinKey() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.MinKey{}), nil +} + +// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMinKey { + return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tMaxKey { + return emptyValue, ValueDecoderError{ + Name: "MaxKeyDecodeValue", + Types: []reflect.Type{tMaxKey}, + Received: reflect.Zero(t), + } + } + + var err error + switch vrType := vr.Type(); vrType { + case bsontype.MaxKey: + err = vr.ReadMaxKey() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(primitive.MaxKey{}), nil +} + +// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tMaxKey { + return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tDecimal { + return emptyValue, ValueDecoderError{ + Name: "Decimal128DecodeValue", + Types: []reflect.Type{tDecimal}, + Received: reflect.Zero(t), + } + } + + var d128 primitive.Decimal128 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Decimal128: + d128, err = vr.ReadDecimal128() + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(d128), nil +} + +// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tDecimal { + return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + + elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tJSONNumber { + return emptyValue, ValueDecoderError{ + Name: "JSONNumberDecodeValue", + Types: []reflect.Type{tJSONNumber}, + Received: reflect.Zero(t), + } + } + + var jsonNum json.Number + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + jsonNum = json.Number(strconv.FormatInt(i64, 10)) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(jsonNum), nil +} + +// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tJSONNumber { + return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + + elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tURL { + return emptyValue, ValueDecoderError{ + Name: "URLDecodeValue", + Types: []reflect.Type{tURL}, + Received: reflect.Zero(t), + } + } + + urlPtr := &url.URL{} + var err error + switch vrType := vr.Type(); vrType { + case bsontype.String: + var str string // Declare str here to avoid shadowing err during the ReadString call. + str, err = vr.ReadString() + if err != nil { + return emptyValue, err + } + + urlPtr, err = url.Parse(str) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(urlPtr).Elem(), nil +} + +// URLDecodeValue is the ValueDecoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tURL { + return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} + } + + elem, err := dvd.urlDecodeType(dc, vr, tURL) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// TimeDecodeValue is the ValueDecoderFunc for time.Time. +// +// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.DateTime { + return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) + } + + dt, err := vr.ReadDateTime() + if err != nil { + return err + } + + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) + return nil +} + +// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. +// +// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { + return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) + } + + if !val.CanSet() || val.Type() != tByteSlice { + return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + } + + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != 0x00 { + return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) + } + + val.Set(reflect.ValueOf(data)) + return nil +} + +// MapDecodeValue is the ValueDecoderFunc for map[string]* types. +// +// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + elem := reflect.New(eType).Elem() + + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) + } + return nil +} + +// ArrayDecodeValue is the ValueDecoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if len(data) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) + } + + for idx, elem := range data { + val.Index(idx).Set(reflect.ValueOf(elem)) + } + return nil + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + default: + return fmt.Errorf("cannot decode %v into an array", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if len(elems) > val.Len() { + return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) + } + + for idx, elem := range elems { + val.Index(idx).Set(elem) + } + + return nil +} + +// SliceDecodeValue is the ValueDecoderFunc for slice types. +// +// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vr.Type() { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + default: + return fmt.Errorf("cannot decode %v into a slice", vr.Type()) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = dvd.decodeD + default: + elemsFunc = dvd.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} + +// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + if !val.Type().Implements(tValueUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + + t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + m, ok := val.Interface().(ValueUnmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} + } + return m.UnmarshalBSONValue(t, src) +} + +// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + + if val.Kind() == reflect.Ptr && val.IsNil() { + if !val.CanSet() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val.Set(reflect.New(val.Type().Elem())) + } + + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + + if !val.Type().Implements(tUnmarshaler) { + if !val.CanAddr() { + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. + } + + m, ok := val.Interface().(Unmarshaler) + if !ok { + // NB: this error should be unreachable due to the above checks + return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} + } + return m.UnmarshalBSON(src) +} + +// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. +// +// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. +func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + rtype, err := dc.LookupTypeMapEntry(vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.EmbeddedDocument: + if dc.Ancestor != nil { + rtype = dc.Ancestor + break + } + rtype = tD + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + default: + return err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return err + } + + elem := reflect.New(rtype).Elem() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCoreDocument { + return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, 0)) + } + + val.SetLen(0) + + cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) + val.Set(reflect.ValueOf(cdoc)) + return err +} + +func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { + elems := make([]reflect.Value, 0) + + ar, err := vr.ReadArray() + if err != nil { + return nil, err + } + + eType := val.Type().Elem() + + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return nil, err + } + eTypeDecoder, _ := decoder.(typeDecoder) + + idx := 0 + for { + vr, err := ar.ReadValue() + if err == bsonrw.ErrEOA { + break + } + if err != nil { + return nil, err + } + + elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) + if err != nil { + return nil, newDecodeError(strconv.Itoa(idx), err) + } + elems = append(elems, elem) + idx++ + } + + return elems, nil +} + +func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { + var cws primitive.CodeWithScope + + code, dr, err := vr.ReadCodeWithScope() + if err != nil { + return cws, err + } + + scope := reflect.New(tD).Elem() + elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) + if err != nil { + return cws, err + } + + scope.Set(reflect.MakeSlice(tD, 0, len(elems))) + scope.Set(reflect.Append(scope, elems...)) + + cws = primitive.CodeWithScope{ + Code: primitive.JavaScript(code), + Scope: scope.Interface().(primitive.D), + } + return cws, nil +} + +func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tCodeWithScope { + return emptyValue, ValueDecoderError{ + Name: "CodeWithScopeDecodeValue", + Types: []reflect.Type{tCodeWithScope}, + Received: reflect.Zero(t), + } + } + + var cws primitive.CodeWithScope + var err error + switch vrType := vr.Type(); vrType { + case bsontype.CodeWithScope: + cws, err = dvd.readCodeWithScope(dc, vr) + case bsontype.Null: + err = vr.ReadNull() + case bsontype.Undefined: + err = vr.ReadUndefined() + default: + return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) + } + if err != nil { + return emptyValue, err + } + + return reflect.ValueOf(cws), nil +} + +// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value decoders registered. +func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tCodeWithScope { + return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { + switch vr.Type() { + case bsontype.Type(0), bsontype.EmbeddedDocument: + default: + return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return nil, err + } + + return dvd.decodeElemsFromDocumentReader(dc, dr) +} + +func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { + decoder, err := dc.LookupDecoder(tEmpty) + if err != nil { + return nil, err + } + + elems := make([]reflect.Value, 0) + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return nil, err + } + + val := reflect.New(tEmpty).Elem() + err = decoder.DecodeValue(dc, vr, val) + if err != nil { + return nil, newDecodeError(key, err) + } + + elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) + } + + return elems, nil +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go new file mode 100644 index 000000000..4ab14a668 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -0,0 +1,856 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "net/url" + "reflect" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var defaultValueEncoders DefaultValueEncoders + +var bvwPool = bsonrw.NewBSONValueWriterPool() + +var errInvalidValue = errors.New("cannot encode invalid element") + +var sliceWriterPool = sync.Pool{ + New: func() interface{} { + sw := make(bsonrw.SliceWriter, 0) + return &sw + }, +} + +func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { + vw, err := dw.WriteDocumentElement(e.Key) + if err != nil { + return err + } + + if e.Value == nil { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) + if err != nil { + return err + } + return nil +} + +// DefaultValueEncoders is a namespace type for the default ValueEncoders used +// when creating a registry. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +type DefaultValueEncoders struct{} + +// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with +// the provided RegistryBuilder. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { + if rb == nil { + panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) + } + rb. + RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). + RegisterTypeEncoder(tTime, defaultTimeCodec). + RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). + RegisterTypeEncoder(tCoreArray, defaultArrayCodec). + RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). + RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). + RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). + RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). + RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). + RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). + RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). + RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). + RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). + RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). + RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). + RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). + RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). + RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). + RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). + RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). + RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). + RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). + RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). + RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). + RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). + RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). + RegisterDefaultEncoder(reflect.Map, defaultMapCodec). + RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). + RegisterDefaultEncoder(reflect.String, defaultStringCodec). + RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). + RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). + RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). + RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). + RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) +} + +// BooleanEncodeValue is the ValueEncoderFunc for bool types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Bool { + return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} + } + return vw.WriteBoolean(val.Bool()) +} + +func fitsIn32Bits(i int64) bool { + return math.MinInt32 <= i && i <= math.MaxInt32 +} + +// IntEncodeValue is the ValueEncoderFunc for int types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32: + return vw.WriteInt32(int32(val.Int())) + case reflect.Int: + i64 := val.Int() + if fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + case reflect.Int64: + i64 := val.Int() + if ec.MinSize && fitsIn32Bits(i64) { + return vw.WriteInt32(int32(i64)) + } + return vw.WriteInt64(i64) + } + + return ValueEncoderError{ + Name: "IntEncodeValue", + Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, + Received: val, + } +} + +// UintEncodeValue is the ValueEncoderFunc for uint types. +// +// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. +func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + if ec.MinSize && u64 <= math.MaxInt32 { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +// FloatEncodeValue is the ValueEncoderFunc for float types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Float32, reflect.Float64: + return vw.WriteDouble(val.Float()) + } + + return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} +} + +// StringEncodeValue is the ValueEncoderFunc for string types. +// +// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. +func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tOID { + return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} + } + return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) +} + +// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDecimal { + return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} + } + return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) +} + +// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJSONNumber { + return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} + } + jsnum := val.Interface().(json.Number) + + // Attempt int first, then float64 + if i64, err := jsnum.Int64(); err == nil { + return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) + } + + f64, err := jsnum.Float64() + if err != nil { + return err + } + + return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) +} + +// URLEncodeValue is the ValueEncoderFunc for url.URL. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tURL { + return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} + } + u := val.Interface().(url.URL) + return vw.WriteString(u.String()) +} + +// TimeEncodeValue is the ValueEncoderFunc for time.TIme. +// +// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. +func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) +} + +// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. +// +// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tByteSlice { + return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} + } + if val.IsNil() { + return vw.WriteNull() + } + return vw.WriteBinary(val.Interface().([]byte)) +} + +// MapEncodeValue is the ValueEncoderFunc for map[string]* types. +// +// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. +func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() { + // If we have a nill map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return dve.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + if collisionFn != nil && collisionFn(key.String()) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(key.String()) + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// ArrayEncodeValue is the ValueEncoderFunc for array types. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Array { + return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().Elem() == tE { + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + e := val.Index(idx).Interface().(primitive.E) + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + var byteSlice []byte + for idx := 0; idx < val.Len(); idx++ { + byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) + } + return vw.WriteBinary(byteSlice) + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// SliceEncodeValue is the ValueEncoderFunc for slice types. +// +// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { + if origEncoder != nil || (currVal.Kind() != reflect.Interface) { + return origEncoder, currVal, nil + } + currVal = currVal.Elem() + if !currVal.IsValid() { + return nil, currVal, errInvalidValue + } + currEncoder, err := ec.LookupEncoder(currVal.Type()) + + return currEncoder, currVal, err +} + +// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. +// +// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. +func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement ValueMarshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + case val.Type().Implements(tValueMarshaler): + // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tValueMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} + } + + m, ok := val.Interface().(ValueMarshaler) + if !ok { + return vw.WriteNull() + } + t, data, err := m.MarshalBSONValue() + if err != nil { + return err + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) +} + +// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Marshaler + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + case val.Type().Implements(tMarshaler): + // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tMarshaler) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} + } + + m, ok := val.Interface().(Marshaler) + if !ok { + return vw.WriteNull() + } + data, err := m.MarshalBSON() + if err != nil { + return err + } + return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) +} + +// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + // Either val or a pointer to val must implement Proxy + switch { + case !val.IsValid(): + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + case val.Type().Implements(tProxy): + // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer + if isImplementationNil(val, tProxy) { + return vw.WriteNull() + } + case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): + val = val.Addr() + default: + return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} + } + + m, ok := val.Interface().(Proxy) + if !ok { + return vw.WriteNull() + } + v, err := m.ProxyBSON() + if err != nil { + return err + } + if v == nil { + encoder, err := ec.LookupEncoder(nil) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) + } + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Ptr, reflect.Interface: + vv = vv.Elem() + } + encoder, err := ec.LookupEncoder(vv.Type()) + if err != nil { + return err + } + return encoder.EncodeValue(ec, vw, vv) +} + +// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tJavaScript { + return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} + } + + return vw.WriteJavascript(val.String()) +} + +// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tSymbol { + return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} + } + + return vw.WriteSymbol(val.String()) +} + +// BinaryEncodeValue is the ValueEncoderFunc for Binary. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tBinary { + return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} + } + b := val.Interface().(primitive.Binary) + + return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) +} + +// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tUndefined { + return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} + } + + return vw.WriteUndefined() +} + +// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDateTime { + return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} + } + + return vw.WriteDateTime(val.Int()) +} + +// NullEncodeValue is the ValueEncoderFunc for Null. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tNull { + return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} + } + + return vw.WriteNull() +} + +// RegexEncodeValue is the ValueEncoderFunc for Regex. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tRegex { + return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} + } + + regex := val.Interface().(primitive.Regex) + + return vw.WriteRegex(regex.Pattern, regex.Options) +} + +// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tDBPointer { + return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} + } + + dbp := val.Interface().(primitive.DBPointer) + + return vw.WriteDBPointer(dbp.DB, dbp.Pointer) +} + +// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTimestamp { + return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} + } + + ts := val.Interface().(primitive.Timestamp) + + return vw.WriteTimestamp(ts.T, ts.I) +} + +// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMinKey { + return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} + } + + return vw.WriteMinKey() +} + +// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tMaxKey { + return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} + } + + return vw.WriteMaxKey() +} + +// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCoreDocument { + return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} + } + + cdoc := val.Interface().(bsoncore.Document) + + return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) +} + +// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default +// value encoders registered. +func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tCodeWithScope { + return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} + } + + cws := val.Interface().(primitive.CodeWithScope) + + dw, err := vw.WriteCodeWithScope(string(cws.Code)) + if err != nil { + return err + } + + sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) + defer sliceWriterPool.Put(sw) + *sw = (*sw)[:0] + + scopeVW := bvwPool.Get(sw) + defer bvwPool.Put(scopeVW) + + encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) + if err != nil { + return err + } + + err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) + if err != nil { + return err + } + + err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) + if err != nil { + return err + } + return dw.WriteDocumentEnd() +} + +// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type +func isImplementationNil(val reflect.Value, inter reflect.Type) bool { + vt := val.Type() + for vt.Kind() == reflect.Ptr { + vt = vt.Elem() + } + return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go new file mode 100644 index 000000000..4613e5a1e --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -0,0 +1,95 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsoncodec provides a system for encoding values to BSON representations and decoding +// values from BSON representations. This package considers both binary BSON and ExtendedJSON as +// BSON representations. The types in this package enable a flexible system for handling this +// encoding and decoding. +// +// The codec system is composed of two parts: +// +// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON +// representations. +// +// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for +// retrieving them. +// +// # ValueEncoders and ValueDecoders +// +// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. +// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the +// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc +// is provided to allow use of a function with the correct signature as a ValueEncoder. An +// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and +// to provide configuration information. +// +// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that +// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to +// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext +// instance is provided and serves similar functionality to the EncodeContext. +// +// # Registry +// +// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type +// documentation for examples of registering various custom encoders and decoders. A Registry can +// have three main types of codecs: +// +// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and +// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value +// whose type matches the registered type exactly. +// If the registered type is an interface, the codec will be invoked when encoding or decoding +// values whose type is the interface, but not for values with concrete types that implement the +// interface. +// +// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and +// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs +// will be invoked when encoding or decoding values whose types implement the interface. An example +// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method +// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. +// +// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type +// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. +// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, +// respectively, when decoding into a bson.D. The following code would change the behavior so these +// values decode as Go int instances instead: +// +// intType := reflect.TypeOf(int(0)) +// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// +// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and +// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding +// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't +// match a registered type or hook encoder/decoder first. These methods should be used to change the +// behavior for all values for a specific kind. +// +// # Registry Lookup Procedure +// +// When looking up an encoder in a Registry, the precedence rules are as follows: +// +// 1. A type encoder registered for the exact type of the value. +// +// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to +// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and +// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries +// constructed using bson.NewRegistry have driver-defined hooks registered for the +// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take +// precedence over any new hooks. +// +// 3. A kind encoder registered for the value's kind. +// +// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The +// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder +// will be returned if no decoder is found. +// +// # DefaultValueEncoders and DefaultValueDecoders +// +// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and +// ValueDecoders for handling a wide range of Go types, including all of the types within the +// primitive package. To make registering these codecs easier, a helper method on each type is +// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for +// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also +// handles registering type map entries for each BSON type. +package bsoncodec diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go new file mode 100644 index 000000000..94f7dcf1e --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -0,0 +1,159 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// EmptyInterfaceCodec is the Codec used for interface{} values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// EmptyInterfaceCodec registered. +type EmptyInterfaceCodec struct { + // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the + // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. + // + // Deprecated: Use bson.Decoder.BinaryAsSlice instead. + DecodeBinaryAsSlice bool +} + +var ( + defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() + + // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it + // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultEmptyInterfaceCodec +) + +// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// EmptyInterfaceCodec registered. +func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { + interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) + + codec := EmptyInterfaceCodec{} + if interfaceOpt.DecodeBinaryAsSlice != nil { + codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice + } + return &codec +} + +// EncodeValue is the ValueEncoderFunc for interface{}. +func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tEmpty { + return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + encoder, err := ec.LookupEncoder(val.Elem().Type()) + if err != nil { + return err + } + + return encoder.EncodeValue(ec, vw, val.Elem()) +} + +func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { + isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } + } + + rtype, err := dc.LookupTypeMapEntry(valueType) + if err == nil { + return rtype, nil + } + + if isDocument { + // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, + // depending on the original valueType. + var lookupType bsontype.Type + switch valueType { + case bsontype.Type(0): + lookupType = bsontype.EmbeddedDocument + case bsontype.EmbeddedDocument: + lookupType = bsontype.Type(0) + } + + rtype, err = dc.LookupTypeMapEntry(lookupType) + if err == nil { + return rtype, nil + } + } + + return nil, err +} + +func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tEmpty { + return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} + } + + rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) + if err != nil { + switch vr.Type() { + case bsontype.Null: + return reflect.Zero(t), vr.ReadNull() + default: + return emptyValue, err + } + } + + decoder, err := dc.LookupDecoder(rtype) + if err != nil { + return emptyValue, err + } + + elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) + if err != nil { + return emptyValue, err + } + + if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { + binElem := elem.Interface().(primitive.Binary) + if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { + elem = reflect.ValueOf(binElem.Data) + } + } + + return elem, nil +} + +// DecodeValue is the ValueDecoderFunc for interface{}. +func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tEmpty { + return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} + } + + elem, err := eic.decodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.Set(elem) + return nil +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go new file mode 100644 index 000000000..325c1738a --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -0,0 +1,327 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding" + "fmt" + "reflect" + "strconv" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var defaultMapCodec = NewMapCodec() + +// MapCodec is the Codec used for map values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// MapCodec registered. +type MapCodec struct { + // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination + // value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroMaps instead. + DecodeZerosMap bool + + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilMapAsEmpty instead. + EncodeNilAsEmpty bool + + // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name + // strings using fmt.Sprintf() instead of the default string conversion logic. + // + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead. + EncodeKeysWithStringer bool +} + +// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. +// This applies to types used as map keys and is similar to encoding.TextMarshaler. +type KeyMarshaler interface { + MarshalKey() (key string, err error) +} + +// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation +// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. +// +// UnmarshalKey must be able to decode the form generated by MarshalKey. +// UnmarshalKey must copy the text if it wishes to retain the text +// after returning. +type KeyUnmarshaler interface { + UnmarshalKey(key string) error +} + +// NewMapCodec returns a MapCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// MapCodec registered. +func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { + mapOpt := bsonoptions.MergeMapCodecOptions(opts...) + + codec := MapCodec{} + if mapOpt.DecodeZerosMap != nil { + codec.DecodeZerosMap = *mapOpt.DecodeZerosMap + } + if mapOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty + } + if mapOpt.EncodeKeysWithStringer != nil { + codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer + } + return &codec +} + +// EncodeValue is the ValueEncoder for map[*]* types. +func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Map { + return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { + // If we have a nil map but we can't WriteNull, that means we're probably trying to encode + // to a TopLevel document. We can't currently tell if this is what actually happened, but if + // there's a deeper underlying problem, the error will also be returned from WriteDocument, + // so just continue. The operations on a map reflection value are valid, so we can call + // MapKeys within mapEncodeValue without a problem. + err := vw.WriteNull() + if err == nil { + return nil + } + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + return mc.mapEncodeValue(ec, dw, val, nil) +} + +// mapEncodeValue handles encoding of the values of a map. The collisionFn returns +// true if the provided key exists, this is mainly used for inline maps in the +// struct codec. +func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + keys := val.MapKeys() + for _, key := range keys { + keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) + if err != nil { + return err + } + + if collisionFn != nil && collisionFn(keyStr) { + return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) + } + + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := dw.WriteDocumentElement(keyStr) + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} + +// DecodeValue is the ValueDecoder for map[string/decimal]* types. +func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { + return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { + clearMap(val) + } + + eType := val.Type().Elem() + decoder, err := dc.LookupDecoder(eType) + if err != nil { + return err + } + eTypeDecoder, _ := decoder.(typeDecoder) + + if eType == tEmpty { + dc.Ancestor = val.Type() + } + + keyType := val.Type().Key() + + for { + key, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + k, err := mc.decodeKey(key, keyType) + if err != nil { + return err + } + + elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) + if err != nil { + return newDecodeError(key, err) + } + + val.SetMapIndex(k, elem) + } + return nil +} + +func clearMap(m reflect.Value) { + var none reflect.Value + for _, k := range m.MapKeys() { + m.SetMapIndex(k, none) + } +} + +func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { + if mc.EncodeKeysWithStringer || encodeKeysWithStringer { + return fmt.Sprint(val), nil + } + + // keys of any string type are used directly + if val.Kind() == reflect.String { + return val.String(), nil + } + // KeyMarshalers are marshaled + if km, ok := val.Interface().(KeyMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + buf, err := km.MarshalKey() + if err == nil { + return buf, nil + } + return "", err + } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } + + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil + } + return "", fmt.Errorf("unsupported key type: %v", val.Type()) +} + +var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { + keyVal := reflect.ValueOf(key) + var err error + switch { + // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler + case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(KeyUnmarshaler) + err = v.UnmarshalKey(key) + keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() + // Otherwise, go to type specific behavior + default: + switch keyType.Kind() { + case reflect.String: + keyVal = reflect.ValueOf(key).Convert(keyType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, parseErr := strconv.ParseInt(key, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { + err = fmt.Errorf("failed to unmarshal number key %v", key) + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, parseErr := strconv.ParseUint(key, 10, 64) + if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { + err = fmt.Errorf("failed to unmarshal number key %v", key) + break + } + keyVal = reflect.ValueOf(n).Convert(keyType) + case reflect.Float32, reflect.Float64: + if mc.EncodeKeysWithStringer { + parsed, err := strconv.ParseFloat(key, 64) + if err != nil { + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) + } + keyVal = reflect.ValueOf(parsed) + break + } + fallthrough + default: + return keyVal, fmt.Errorf("unsupported key type: %v", keyType) + } + } + return keyVal, err +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go new file mode 100644 index 000000000..fbd9f0a9e --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go @@ -0,0 +1,65 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import "fmt" + +type mode int + +const ( + _ mode = iota + mTopLevel + mDocument + mArray + mValue + mElement + mCodeWithScope + mSpacer +) + +func (m mode) String() string { + var str string + + switch m { + case mTopLevel: + str = "TopLevel" + case mDocument: + str = "DocumentMode" + case mArray: + str = "ArrayMode" + case mValue: + str = "ValueMode" + case mElement: + str = "ElementMode" + case mCodeWithScope: + str = "CodeWithScopeMode" + case mSpacer: + str = "CodeWithScopeSpacerFrame" + default: + str = "UnknownMode" + } + + return str +} + +// TransitionError is an error returned when an invalid progressing a +// ValueReader or ValueWriter state machine occurs. +type TransitionError struct { + parent mode + current mode + destination mode +} + +func (te TransitionError) Error() string { + if te.destination == mode(0) { + return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) + } + if te.parent == mode(0) { + return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) + } + return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go new file mode 100644 index 000000000..e5923230b --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -0,0 +1,100 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +var _ ValueEncoder = &PointerCodec{} +var _ ValueDecoder = &PointerCodec{} + +// PointerCodec is the Codec used for pointers. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// PointerCodec registered. +type PointerCodec struct { + ecache typeEncoderCache + dcache typeDecoderCache +} + +// NewPointerCodec returns a PointerCodec that has been initialized. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// PointerCodec registered. +func NewPointerCodec() *PointerCodec { + return &PointerCodec{} +} + +// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil +// or looking up an encoder for the type of value the pointer points to. +func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.Ptr { + if !val.IsValid() { + return vw.WriteNull() + } + return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + if val.IsNil() { + return vw.WriteNull() + } + + typ := val.Type() + if v, ok := pc.ecache.Load(typ); ok { + if v == nil { + return ErrNoEncoder{Type: typ} + } + return v.EncodeValue(ec, vw, val.Elem()) + } + // TODO(charlie): handle concurrent requests for the same type + enc, err := ec.LookupEncoder(typ.Elem()) + enc = pc.ecache.LoadOrStore(typ, enc) + if err != nil { + return err + } + return enc.EncodeValue(ec, vw, val.Elem()) +} + +// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and +// using that to decode. If the BSON value is Null, this method will set the pointer to nil. +func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Ptr { + return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} + } + + typ := val.Type() + if vr.Type() == bsontype.Null { + val.Set(reflect.Zero(typ)) + return vr.ReadNull() + } + if vr.Type() == bsontype.Undefined { + val.Set(reflect.Zero(typ)) + return vr.ReadUndefined() + } + + if val.IsNil() { + val.Set(reflect.New(typ.Elem())) + } + + if v, ok := pc.dcache.Load(typ); ok { + if v == nil { + return ErrNoDecoder{Type: typ} + } + return v.DecodeValue(dc, vr, val.Elem()) + } + // TODO(charlie): handle concurrent requests for the same type + dec, err := dc.LookupDecoder(typ.Elem()) + dec = pc.dcache.LoadOrStore(typ, dec) + if err != nil { + return err + } + return dec.DecodeValue(dc, vr, val.Elem()) +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go new file mode 100644 index 000000000..4cf2b01ab --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types +// that implement this interface with have ProxyBSON called during the encoding process and that +// value will be encoded in place for the implementer. +type Proxy interface { + ProxyBSON() (interface{}, error) +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go new file mode 100644 index 000000000..f309ee2b3 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -0,0 +1,527 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. +// +// Deprecated: ErrNilType will not be supported in Go Driver 2.0. +var ErrNilType = errors.New("cannot perform a decoder lookup on ") + +// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. +// +// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. +var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") + +// ErrNoEncoder is returned when there wasn't an encoder available for a type. +// +// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. +type ErrNoEncoder struct { + Type reflect.Type +} + +func (ene ErrNoEncoder) Error() string { + if ene.Type == nil { + return "no encoder found for " + } + return "no encoder found for " + ene.Type.String() +} + +// ErrNoDecoder is returned when there wasn't a decoder available for a type. +// +// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. +type ErrNoDecoder struct { + Type reflect.Type +} + +func (end ErrNoDecoder) Error() string { + return "no decoder found for " + end.Type.String() +} + +// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. +// +// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. +type ErrNoTypeMapEntry struct { + Type bsontype.Type +} + +func (entme ErrNoTypeMapEntry) Error() string { + return "no type map entry found for " + entme.Type.String() +} + +// ErrNotInterface is returned when the provided type is not an interface. +// +// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. +var ErrNotInterface = errors.New("The provided type is not an interface") + +// A RegistryBuilder is used to build a Registry. This type is not goroutine +// safe. +// +// Deprecated: Use Registry instead. +type RegistryBuilder struct { + registry *Registry +} + +// NewRegistryBuilder creates a new empty RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. +func NewRegistryBuilder() *RegistryBuilder { + return &RegistryBuilder{ + registry: NewRegistry(), + } +} + +// RegisterCodec will register the provided ValueCodec for the provided type. +// +// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. +func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { + rb.RegisterTypeEncoder(t, codec) + rb.RegisterTypeDecoder(t, codec) + return rb +} + +// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. +// +// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It +// will not be called when marshaling a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeEncoder instead. +func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterTypeEncoder(t, enc) + return rb +} + +// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when +// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceEncoder instead. +func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterInterfaceEncoder(t, enc) + return rb +} + +// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. +// +// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered +// for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. +// It will not be called when unmarshaling into a non-interface type that implements the interface. +// +// Deprecated: Use Registry.RegisterTypeDecoder instead. +func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterTypeDecoder(t, dec) + return rb +} + +// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when +// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not +// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. +// +// Deprecated: Use Registry.RegisterInterfaceDecoder instead. +func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterInterfaceDecoder(t, dec) + return rb +} + +// RegisterEncoder registers the provided type and encoder pair. +// +// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. +func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { + if t == tEmpty { + rb.registry.RegisterTypeEncoder(t, enc) + return rb + } + switch t.Kind() { + case reflect.Interface: + rb.registry.RegisterInterfaceEncoder(t, enc) + default: + rb.registry.RegisterTypeEncoder(t, enc) + } + return rb +} + +// RegisterDecoder registers the provided type and decoder pair. +// +// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. +func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { + if t == nil { + rb.registry.RegisterTypeDecoder(t, dec) + return rb + } + if t == tEmpty { + rb.registry.RegisterTypeDecoder(t, dec) + return rb + } + switch t.Kind() { + case reflect.Interface: + rb.registry.RegisterInterfaceDecoder(t, dec) + default: + rb.registry.RegisterTypeDecoder(t, dec) + } + return rb +} + +// RegisterDefaultEncoder will register the provided ValueEncoder to the provided +// kind. +// +// Deprecated: Use Registry.RegisterKindEncoder instead. +func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { + rb.registry.RegisterKindEncoder(kind, enc) + return rb +} + +// RegisterDefaultDecoder will register the provided ValueDecoder to the +// provided kind. +// +// Deprecated: Use Registry.RegisterKindDecoder instead. +func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { + rb.registry.RegisterKindDecoder(kind, dec) + return rb +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +// +// Deprecated: Use Registry.RegisterTypeMapEntry instead. +func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { + rb.registry.RegisterTypeMapEntry(bt, rt) + return rb +} + +// Build creates a Registry from the current state of this RegistryBuilder. +// +// Deprecated: Use NewRegistry instead. +func (rb *RegistryBuilder) Build() *Registry { + r := &Registry{ + interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), + interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), + typeEncoders: rb.registry.typeEncoders.Clone(), + typeDecoders: rb.registry.typeDecoders.Clone(), + kindEncoders: rb.registry.kindEncoders.Clone(), + kindDecoders: rb.registry.kindDecoders.Clone(), + } + rb.registry.typeMap.Range(func(k, v interface{}) bool { + if k != nil && v != nil { + r.typeMap.Store(k, v) + } + return true + }) + return r +} + +// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main +// typed passed around and Encoders and Decoders are constructed from it. +type Registry struct { + interfaceEncoders []interfaceValueEncoder + interfaceDecoders []interfaceValueDecoder + typeEncoders *typeEncoderCache + typeDecoders *typeDecoderCache + kindEncoders *kindEncoderCache + kindDecoders *kindDecoderCache + typeMap sync.Map // map[bsontype.Type]reflect.Type +} + +// NewRegistry creates a new empty Registry. +func NewRegistry() *Registry { + return &Registry{ + typeEncoders: new(typeEncoderCache), + typeDecoders: new(typeDecoderCache), + kindEncoders: new(kindEncoderCache), + kindDecoders: new(kindDecoderCache), + } +} + +// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. +// +// The type will be used as provided, so an encoder can be registered for a type and a different +// encoder can be registered for a pointer to that type. +// +// If the given type is an interface, the encoder will be called when marshaling a type that is +// that interface. It will not be called when marshaling a non-interface type that implements the +// interface. To get the latter behavior, call RegisterHookEncoder instead. +// +// RegisterTypeEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { + r.typeEncoders.Store(valueType, enc) +} + +// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. +// +// The type will be used as provided, so a decoder can be registered for a type and a different +// decoder can be registered for a pointer to that type. +// +// If the given type is an interface, the decoder will be called when unmarshaling into a type that +// is that interface. It will not be called when unmarshaling into a non-interface type that +// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. +// +// RegisterTypeDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { + r.typeDecoders.Store(valueType, dec) +} + +// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. +// +// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an encoder for MyInt and int32, use RegisterKindEncoder like +// +// reg.RegisterKindEncoder(reflect.Int32, myEncoder) +// +// RegisterKindEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { + r.kindEncoders.Store(kind, enc) +} + +// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. +// +// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For +// example, consider the type MyInt defined as +// +// type MyInt int32 +// +// To define an decoder for MyInt and int32, use RegisterKindDecoder like +// +// reg.RegisterKindDecoder(reflect.Int32, myDecoder) +// +// RegisterKindDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { + r.kindDecoders.Store(kind, dec) +} + +// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will +// be called when marshaling a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface +// (i.e. iface.Kind() != reflect.Interface), this method will panic. +// +// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } + + for idx, encoder := range r.interfaceEncoders { + if encoder.i == iface { + r.interfaceEncoders[idx].ve = enc + return + } + } + + r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) +} + +// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will +// be called when unmarshaling into a type if the type implements iface or a pointer to the type +// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), +// this method will panic. +// +// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. +func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { + if iface.Kind() != reflect.Interface { + panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ + "got type %s with kind %s", iface, iface.Kind()) + panic(panicStr) + } + + for idx, decoder := range r.interfaceDecoders { + if decoder.i == iface { + r.interfaceDecoders[idx].vd = dec + return + } + } + + r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) +} + +// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this +// mapping is decoding situations where an empty interface is used and a default type needs to be +// created and decoded into. +// +// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON +// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents +// to decode to bson.Raw, use the following code: +// +// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) +func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { + r.typeMap.Store(bt, rt) +} + +// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup +// order: +// +// 1. An encoder registered for the exact type. If the given type is an interface, an encoder +// registered using RegisterTypeEncoder for that interface will be selected. +// +// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type +// or by a pointer to the type. +// +// 3. An encoder registered using RegisterKindEncoder for the kind of value. +// +// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for +// concurrent use by multiple goroutines after all codecs and encoders are registered. +func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { + enc, found := r.lookupTypeEncoder(valueType) + if found { + if enc == nil { + return nil, ErrNoEncoder{Type: valueType} + } + return enc, nil + } + + enc, found = r.lookupInterfaceEncoder(valueType, true) + if found { + return r.typeEncoders.LoadOrStore(valueType, enc), nil + } + if valueType == nil { + r.storeTypeEncoder(valueType, nil) + return nil, ErrNoEncoder{Type: valueType} + } + + if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { + return r.storeTypeEncoder(valueType, v), nil + } + r.storeTypeEncoder(valueType, nil) + return nil, ErrNoEncoder{Type: valueType} +} + +func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { + return r.typeEncoders.LoadOrStore(rt, enc) +} + +func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { + return r.typeEncoders.Load(rt) +} + +func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { + if valueType == nil { + return nil, false + } + for _, ienc := range r.interfaceEncoders { + if valueType.Implements(ienc.i) { + return ienc.ve, true + } + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceEncoders + defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) + if !found { + defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) + } + return newCondAddrEncoder(ienc.ve, defaultEnc), true + } + } + return nil, false +} + +// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup +// order: +// +// 1. A decoder registered for the exact type. If the given type is an interface, a decoder +// registered using RegisterTypeDecoder for that interface will be selected. +// +// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by +// a pointer to the type. +// +// 3. A decoder registered using RegisterKindDecoder for the kind of value. +// +// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for +// concurrent use by multiple goroutines after all codecs and decoders are registered. +func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { + if valueType == nil { + return nil, ErrNilType + } + dec, found := r.lookupTypeDecoder(valueType) + if found { + if dec == nil { + return nil, ErrNoDecoder{Type: valueType} + } + return dec, nil + } + + dec, found = r.lookupInterfaceDecoder(valueType, true) + if found { + return r.storeTypeDecoder(valueType, dec), nil + } + + if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { + return r.storeTypeDecoder(valueType, v), nil + } + r.storeTypeDecoder(valueType, nil) + return nil, ErrNoDecoder{Type: valueType} +} + +func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { + return r.typeDecoders.Load(valueType) +} + +func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { + return r.typeDecoders.LoadOrStore(typ, dec) +} + +func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { + for _, idec := range r.interfaceDecoders { + if valueType.Implements(idec.i) { + return idec.vd, true + } + if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { + // if *t implements an interface, this will catch if t implements an interface further + // ahead in interfaceDecoders + defaultDec, found := r.lookupInterfaceDecoder(valueType, false) + if !found { + defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) + } + return newCondAddrDecoder(idec.vd, defaultDec), true + } + } + return nil, false +} + +// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON +// type. If no type is found, ErrNoTypeMapEntry is returned. +// +// LookupTypeMapEntry should not be called concurrently with any other Registry method. +func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { + v, ok := r.typeMap.Load(bt) + if v == nil || !ok { + return nil, ErrNoTypeMapEntry{Type: bt} + } + return v.(reflect.Type), nil +} + +type interfaceValueEncoder struct { + i reflect.Type + ve ValueEncoder +} + +type interfaceValueDecoder struct { + i reflect.Type + vd ValueDecoder +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go new file mode 100644 index 000000000..a43daf005 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -0,0 +1,199 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +var defaultSliceCodec = NewSliceCodec() + +// SliceCodec is the Codec used for slice values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// SliceCodec registered. +type SliceCodec struct { + // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of + // BSON null. + // + // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. + EncodeNilAsEmpty bool +} + +// NewSliceCodec returns a MapCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// SliceCodec registered. +func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { + sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) + + codec := SliceCodec{} + if sliceOpt.EncodeNilAsEmpty != nil { + codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty + } + return &codec +} + +// EncodeValue is the ValueEncoder for slice types. +func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Slice { + return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { + return vw.WriteNull() + } + + // If we have a []byte we want to treat it as a binary instead of as an array. + if val.Type().Elem() == tByte { + byteSlice := make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(byteSlice), val) + return vw.WriteBinary(byteSlice) + } + + // If we have a []primitive.E we want to treat it as a document instead of as an array. + if val.Type() == tD || val.Type().ConvertibleTo(tD) { + d := val.Convert(tD).Interface().(primitive.D) + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + + for _, e := range d { + err = encodeElement(ec, dw, e) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() + } + + aw, err := vw.WriteArray() + if err != nil { + return err + } + + elemType := val.Type().Elem() + encoder, err := ec.LookupEncoder(elemType) + if err != nil && elemType.Kind() != reflect.Interface { + return err + } + + for idx := 0; idx < val.Len(); idx++ { + currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) + if lookupErr != nil && lookupErr != errInvalidValue { + return lookupErr + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + if lookupErr == errInvalidValue { + err = vw.WriteNull() + if err != nil { + return err + } + continue + } + + err = currEncoder.EncodeValue(ec, vw, currVal) + if err != nil { + return err + } + } + return aw.WriteArrayEnd() +} + +// DecodeValue is the ValueDecoder for slice types. +func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Slice { + return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Array: + case bsontype.Null: + val.Set(reflect.Zero(val.Type())) + return vr.ReadNull() + case bsontype.Undefined: + val.Set(reflect.Zero(val.Type())) + return vr.ReadUndefined() + case bsontype.Type(0), bsontype.EmbeddedDocument: + if val.Type().Elem() != tE { + return fmt.Errorf("cannot decode document into %s", val.Type()) + } + case bsontype.Binary: + if val.Type().Elem() != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) + } + data, subtype, err := vr.ReadBinary() + if err != nil { + return err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) + } + val.SetLen(0) + val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) + return nil + case bsontype.String: + if sliceType := val.Type().Elem(); sliceType != tByte { + return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) + } + str, err := vr.ReadString() + if err != nil { + return err + } + byteStr := []byte(str) + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) + } + val.SetLen(0) + val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) + return nil + default: + return fmt.Errorf("cannot decode %v into a slice", vrType) + } + + var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) + switch val.Type().Elem() { + case tE: + dc.Ancestor = val.Type() + elemsFunc = defaultValueDecoders.decodeD + default: + elemsFunc = defaultValueDecoders.decodeDefault + } + + elems, err := elemsFunc(dc, vr, val) + if err != nil { + return err + } + + if val.IsNil() { + val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) + } + + val.SetLen(0) + val.Set(reflect.Append(val, elems...)) + + return nil +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go new file mode 100644 index 000000000..ff931b725 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -0,0 +1,132 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// StringCodec is the Codec used for string values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StringCodec registered. +type StringCodec struct { + // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. + // If false, a string made from the raw object ID bytes will be used. Defaults to true. + // + // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. + DecodeObjectIDAsHex bool +} + +var ( + defaultStringCodec = NewStringCodec() + + // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be + // used by collection type decoders (e.g. map, slice, etc) to set individual values in a + // collection. + _ typeDecoder = defaultStringCodec +) + +// NewStringCodec returns a StringCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StringCodec registered. +func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { + stringOpt := bsonoptions.MergeStringCodecOptions(opts...) + return &StringCodec{*stringOpt.DecodeObjectIDAsHex} +} + +// EncodeValue is the ValueEncoder for string types. +func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if val.Kind() != reflect.String { + return ValueEncoderError{ + Name: "StringEncodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: val, + } + } + + return vw.WriteString(val.String()) +} + +func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t.Kind() != reflect.String { + return emptyValue, ValueDecoderError{ + Name: "StringDecodeValue", + Kinds: []reflect.Kind{reflect.String}, + Received: reflect.Zero(t), + } + } + + var str string + var err error + switch vr.Type() { + case bsontype.String: + str, err = vr.ReadString() + if err != nil { + return emptyValue, err + } + case bsontype.ObjectID: + oid, err := vr.ReadObjectID() + if err != nil { + return emptyValue, err + } + if sc.DecodeObjectIDAsHex { + str = oid.Hex() + } else { + // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. + byteArray := [12]byte(oid) + str = string(byteArray[:]) + } + case bsontype.Symbol: + str, err = vr.ReadSymbol() + if err != nil { + return emptyValue, err + } + case bsontype.Binary: + data, subtype, err := vr.ReadBinary() + if err != nil { + return emptyValue, err + } + if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { + return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} + } + str = string(data) + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) + } + + return reflect.ValueOf(str), nil +} + +// DecodeValue is the ValueDecoder for string types. +func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.String { + return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} + } + + elem, err := sc.decodeType(dctx, vr, val.Type()) + if err != nil { + return err + } + + val.SetString(elem.String()) + return nil +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go new file mode 100644 index 000000000..4cde0a4d6 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -0,0 +1,718 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. +type DecodeError struct { + keys []string + wrapped error +} + +// Unwrap returns the underlying error +func (de *DecodeError) Unwrap() error { + return de.wrapped +} + +// Error implements the error interface. +func (de *DecodeError) Error() string { + // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the + // stack of BSON keys, so we call de.Keys(), which reverses them. + keyPath := strings.Join(de.Keys(), ".") + return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) +} + +// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down +// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be +// a string, the keys slice will be ["a", "b", "c"]. +func (de *DecodeError) Keys() []string { + reversedKeys := make([]string, 0, len(de.keys)) + for idx := len(de.keys) - 1; idx >= 0; idx-- { + reversedKeys = append(reversedKeys, de.keys[idx]) + } + + return reversedKeys +} + +// Zeroer allows custom struct types to implement a report of zero +// state. All struct types that don't implement Zeroer or where IsZero +// returns false are considered to be not zero. +type Zeroer interface { + IsZero() bool +} + +// StructCodec is the Codec used for struct values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StructCodec registered. +type StructCodec struct { + cache sync.Map // map[reflect.Type]*structDescription + parser StructTagParser + + // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: Use bson.Decoder.ZeroStructs instead. + DecodeZeroStruct bool + + // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the + // destination value passed to Decode before unmarshaling BSON documents into them. + // + // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. + DecodeDeepZeroInline bool + + // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. + // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag + // option is set. + // + // Deprecated: Use bson.Encoder.OmitZeroStruct instead. + EncodeOmitDefaultStruct bool + + // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. + // + // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be + // supported in Go Driver 2.0. + AllowUnexportedFields bool + + // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is + // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The + // default value is true. + // + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead. + OverwriteDuplicatedInlinedFields bool +} + +var _ ValueEncoder = &StructCodec{} +var _ ValueDecoder = &StructCodec{} + +// NewStructCodec returns a StructCodec that uses p for struct tag parsing. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// StructCodec registered. +func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { + if p == nil { + return nil, errors.New("a StructTagParser must be provided to NewStructCodec") + } + + structOpt := bsonoptions.MergeStructCodecOptions(opts...) + + codec := &StructCodec{ + parser: p, + } + + if structOpt.DecodeZeroStruct != nil { + codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct + } + if structOpt.DecodeDeepZeroInline != nil { + codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline + } + if structOpt.EncodeOmitDefaultStruct != nil { + codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct + } + if structOpt.OverwriteDuplicatedInlinedFields != nil { + codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields + } + if structOpt.AllowUnexportedFields != nil { + codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields + } + + return codec, nil +} + +// EncodeValue handles encoding generic struct types. +func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Kind() != reflect.Struct { + return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) + if err != nil { + return err + } + + dw, err := vw.WriteDocument() + if err != nil { + return err + } + var rv reflect.Value + for _, desc := range sd.fl { + if desc.inline == nil { + rv = val.Field(desc.idx) + } else { + rv, err = fieldByIndexErr(val, desc.inline) + if err != nil { + continue + } + } + + desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) + + if err != nil && err != errInvalidValue { + return err + } + + if err == errInvalidValue { + if desc.omitEmpty { + continue + } + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + err = vw2.WriteNull() + if err != nil { + return err + } + continue + } + + if desc.encoder == nil { + return ErrNoEncoder{Type: rv.Type()} + } + + encoder := desc.encoder + + var zero bool + if cz, ok := encoder.(CodecZeroer); ok { + zero = cz.IsTypeZero(rv.Interface()) + } else if rv.Kind() == reflect.Interface { + // isZero will not treat an interface rv as an interface, so we need to check for the + // zero interface separately. + zero = rv.IsNil() + } else { + zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) + } + if desc.omitEmpty && zero { + continue + } + + vw2, err := dw.WriteDocumentElement(desc.name) + if err != nil { + return err + } + + ectx := EncodeContext{ + Registry: ec.Registry, + MinSize: desc.minSize || ec.MinSize, + errorOnInlineDuplicates: ec.errorOnInlineDuplicates, + stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, + nilMapAsEmpty: ec.nilMapAsEmpty, + nilSliceAsEmpty: ec.nilSliceAsEmpty, + nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, + omitZeroStruct: ec.omitZeroStruct, + useJSONStructTags: ec.useJSONStructTags, + } + err = encoder.EncodeValue(ectx, vw2, rv) + if err != nil { + return err + } + } + + if sd.inlineMap >= 0 { + rv := val.Field(sd.inlineMap) + collisionFn := func(key string) bool { + _, exists := sd.fm[key] + return exists + } + + return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) + } + + return dw.WriteDocumentEnd() +} + +func newDecodeError(key string, original error) error { + de, ok := original.(*DecodeError) + if !ok { + return &DecodeError{ + keys: []string{key}, + wrapped: original, + } + } + + de.keys = append(de.keys, key) + return de +} + +// DecodeValue implements the Codec interface. +// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. +// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. +func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Kind() != reflect.Struct { + return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} + } + + switch vrType := vr.Type(); vrType { + case bsontype.Type(0), bsontype.EmbeddedDocument: + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return err + } + + val.Set(reflect.Zero(val.Type())) + return nil + default: + return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) + } + + sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) + if err != nil { + return err + } + + if sc.DecodeZeroStruct || dc.zeroStructs { + val.Set(reflect.Zero(val.Type())) + } + if sc.DecodeDeepZeroInline && sd.inline { + val.Set(deepZero(val.Type())) + } + + var decoder ValueDecoder + var inlineMap reflect.Value + if sd.inlineMap >= 0 { + inlineMap = val.Field(sd.inlineMap) + decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) + if err != nil { + return err + } + } + + dr, err := vr.ReadDocument() + if err != nil { + return err + } + + for { + name, vr, err := dr.ReadElement() + if err == bsonrw.ErrEOD { + break + } + if err != nil { + return err + } + + fd, exists := sd.fm[name] + if !exists { + // if the original name isn't found in the struct description, try again with the name in lowercase + // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field + // names + fd, exists = sd.fm[strings.ToLower(name)] + } + + if !exists { + if sd.inlineMap < 0 { + // The encoding/json package requires a flag to return on error for non-existent fields. + // This functionality seems appropriate for the struct codec. + err = vr.Skip() + if err != nil { + return err + } + continue + } + + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + + elem := reflect.New(inlineMap.Type().Elem()).Elem() + dc.Ancestor = inlineMap.Type() + err = decoder.DecodeValue(dc, vr, elem) + if err != nil { + return err + } + inlineMap.SetMapIndex(reflect.ValueOf(name), elem) + continue + } + + var field reflect.Value + if fd.inline == nil { + field = val.Field(fd.idx) + } else { + field, err = getInlineField(val, fd.inline) + if err != nil { + return err + } + } + + if !field.CanSet() { // Being settable is a super set of being addressable. + innerErr := fmt.Errorf("field %v is not settable", field) + return newDecodeError(fd.name, innerErr) + } + if field.Kind() == reflect.Ptr && field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + field = field.Addr() + + dctx := DecodeContext{ + Registry: dc.Registry, + Truncate: fd.truncate || dc.Truncate, + defaultDocumentType: dc.defaultDocumentType, + binaryAsSlice: dc.binaryAsSlice, + useJSONStructTags: dc.useJSONStructTags, + useLocalTimeZone: dc.useLocalTimeZone, + zeroMaps: dc.zeroMaps, + zeroStructs: dc.zeroStructs, + } + + if fd.decoder == nil { + return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) + } + + err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) + if err != nil { + return newDecodeError(fd.name, err) + } + } + + return nil +} + +func isZero(v reflect.Value, omitZeroStruct bool) bool { + kind := v.Kind() + if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { + return v.Interface().(Zeroer).IsZero() + } + if kind == reflect.Struct { + if !omitZeroStruct { + return false + } + vt := v.Type() + if vt == tTime { + return v.Interface().(time.Time).IsZero() + } + numField := vt.NumField() + for i := 0; i < numField; i++ { + ff := vt.Field(i) + if ff.PkgPath != "" && !ff.Anonymous { + continue // Private field + } + if !isZero(v.Field(i), omitZeroStruct) { + return false + } + } + return true + } + return !v.IsValid() || v.IsZero() +} + +type structDescription struct { + fm map[string]fieldDescription + fl []fieldDescription + inlineMap int + inline bool +} + +type fieldDescription struct { + name string // BSON key name + fieldName string // struct field name + idx int + omitEmpty bool + minSize bool + truncate bool + inline []int + encoder ValueEncoder + decoder ValueDecoder +} + +type byIndex []fieldDescription + +func (bi byIndex) Len() int { return len(bi) } + +func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } + +func (bi byIndex) Less(i, j int) bool { + // If a field is inlined, its index in the top level struct is stored at inline[0] + iIdx, jIdx := bi[i].idx, bi[j].idx + if len(bi[i].inline) > 0 { + iIdx = bi[i].inline[0] + } + if len(bi[j].inline) > 0 { + jIdx = bi[j].inline[0] + } + if iIdx != jIdx { + return iIdx < jIdx + } + for k, biik := range bi[i].inline { + if k >= len(bi[j].inline) { + return false + } + if biik != bi[j].inline[k] { + return biik < bi[j].inline[k] + } + } + return len(bi[i].inline) < len(bi[j].inline) +} + +func (sc *StructCodec) describeStruct( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { + // We need to analyze the struct, including getting the tags, collecting + // information about inlining, and create a map of the field name to the field. + if v, ok := sc.cache.Load(t); ok { + return v.(*structDescription), nil + } + // TODO(charlie): Only describe the struct once when called + // concurrently with the same type. + ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { + ds = v.(*structDescription) + } + return ds, nil +} + +func (sc *StructCodec) describeStructSlow( + r *Registry, + t reflect.Type, + useJSONStructTags bool, + errorOnDuplicates bool, +) (*structDescription, error) { + numFields := t.NumField() + sd := &structDescription{ + fm: make(map[string]fieldDescription, numFields), + fl: make([]fieldDescription, 0, numFields), + inlineMap: -1, + } + + var fields []fieldDescription + for i := 0; i < numFields; i++ { + sf := t.Field(i) + if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { + // field is private or unexported fields aren't allowed, ignore + continue + } + + sfType := sf.Type + encoder, err := r.LookupEncoder(sfType) + if err != nil { + encoder = nil + } + decoder, err := r.LookupDecoder(sfType) + if err != nil { + decoder = nil + } + + description := fieldDescription{ + fieldName: sf.Name, + idx: i, + encoder: encoder, + decoder: decoder, + } + + var stags StructTags + // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser + // instead of the parser defined on the codec. + if useJSONStructTags { + stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) + } else { + stags, err = sc.parser.ParseStructTags(sf) + } + if err != nil { + return nil, err + } + if stags.Skip { + continue + } + description.name = stags.Name + description.omitEmpty = stags.OmitEmpty + description.minSize = stags.MinSize + description.truncate = stags.Truncate + + if stags.Inline { + sd.inline = true + switch sfType.Kind() { + case reflect.Map: + if sd.inlineMap >= 0 { + return nil, errors.New("(struct " + t.String() + ") multiple inline maps") + } + if sfType.Key() != tString { + return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") + } + sd.inlineMap = description.idx + case reflect.Ptr: + sfType = sfType.Elem() + if sfType.Kind() != reflect.Struct { + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + fallthrough + case reflect.Struct: + inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) + if err != nil { + return nil, err + } + for _, fd := range inlinesf.fl { + if fd.inline == nil { + fd.inline = []int{i, fd.idx} + } else { + fd.inline = append([]int{i}, fd.inline...) + } + fields = append(fields, fd) + + } + default: + return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) + } + continue + } + fields = append(fields, description) + } + + // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name + sort.Slice(fields, func(i, j int) bool { + x := fields + // sort field by name, breaking ties with depth, then + // breaking ties with index sequence. + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].inline) != len(x[j].inline) { + return len(x[i].inline) < len(x[j].inline) + } + return byIndex(x).Less(i, j) + }) + + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + sd.fl = append(sd.fl, fi) + sd.fm[name] = fi + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { + return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) + } + sd.fl = append(sd.fl, dominant) + sd.fm[name] = dominant + } + + sort.Sort(byIndex(sd.fl)) + + return sd, nil +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's inlining rules. If there are multiple top-level +// fields, the boolean will be false: This condition is an error in Go +// and we skip all the fields. +func dominantField(fields []fieldDescription) (fieldDescription, bool) { + // The fields are sorted in increasing index-length order, then by presence of tag. + // That means that the first field is the dominant one. We need only check + // for error cases: two fields at top level. + if len(fields) > 1 && + len(fields[0].inline) == len(fields[1].inline) { + return fieldDescription{}, false + } + return fields[0], true +} + +func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { + defer func() { + if recovered := recover(); recovered != nil { + switch r := recovered.(type) { + case string: + err = fmt.Errorf("%s", r) + case error: + err = r + } + } + }() + + result = v.FieldByIndex(index) + return +} + +func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { + field, err := fieldByIndexErr(val, index) + if err == nil { + return field, nil + } + + // if parent of this element doesn't exist, fix its parent + inlineParent := index[:len(index)-1] + var fParent reflect.Value + if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { + fParent, err = getInlineField(val, inlineParent) + if err != nil { + return fParent, err + } + } + fParent.Set(reflect.New(fParent.Type().Elem())) + + return fieldByIndexErr(val, index) +} + +// DeepZero returns recursive zero object +func deepZero(st reflect.Type) (result reflect.Value) { + if st.Kind() == reflect.Struct { + numField := st.NumField() + for i := 0; i < numField; i++ { + if result == emptyValue { + result = reflect.Indirect(reflect.New(st)) + } + f := result.Field(i) + if f.CanInterface() { + if f.Type().Kind() == reflect.Struct { + result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) + } + } + } + } + return result +} + +// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside +func recursivePointerTo(v reflect.Value) reflect.Value { + v = reflect.Indirect(v) + result := reflect.New(v.Type()) + if v.Kind() == reflect.Struct { + for i := 0; i < v.NumField(); i++ { + if f := v.Field(i); f.Kind() == reflect.Ptr { + if f.Elem().Kind() == reflect.Struct { + result.Elem().Field(i).Set(recursivePointerTo(f)) + } + } + } + } + + return result +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go new file mode 100644 index 000000000..18d85bfb0 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -0,0 +1,148 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "reflect" + "strings" +) + +// StructTagParser returns the struct tags for a given struct field. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTagParser interface { + ParseStructTags(reflect.StructField) (StructTags, error) +} + +// StructTagParserFunc is an adapter that allows a generic function to be used +// as a StructTagParser. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTagParserFunc func(reflect.StructField) (StructTags, error) + +// ParseStructTags implements the StructTagParser interface. +func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { + return stpf(sf) +} + +// StructTags represents the struct tag fields that the StructCodec uses during +// the encoding and decoding process. +// +// In the case of a struct, the lowercased field name is used as the key for each exported +// field but this behavior may be changed using a struct tag. The tag may also contain flags to +// adjust the marshalling behavior for the field. +// +// The properties are defined below: +// +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. +// +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. +// +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. +// +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. +// +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. +// +// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. +type StructTags struct { + Name string + OmitEmpty bool + MinSize bool + Truncate bool + Inline bool + Skip bool +} + +// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. +// It will handle the bson struct tag. See the documentation for StructTags to see +// what each of the returned fields means. +// +// If there is no name in the struct tag fields, the struct field name is lowercased. +// The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// An example: +// +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } +// +// A struct tag either consisting entirely of '-' or with a bson key with a +// value consisting entirely of '-' will return a StructTags with Skip true and +// the remaining fields will be their default values. +// +// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. +var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + return parseTags(key, tag) +} + +func parseTags(key string, tag string) (StructTags, error) { + var st StructTags + if tag == "-" { + st.Skip = true + return st, nil + } + + for idx, str := range strings.Split(tag, ",") { + if idx == 0 && str != "" { + key = str + } + switch str { + case "omitempty": + st.OmitEmpty = true + case "minsize": + st.MinSize = true + case "truncate": + st.Truncate = true + case "inline": + st.Inline = true + } + } + + st.Name = key + + return st, nil +} + +// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser +// but will also fallback to parsing the json tag instead on a field where the +// bson tag isn't available. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and +// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. +var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { + key := strings.ToLower(sf.Name) + tag, ok := sf.Tag.Lookup("bson") + if !ok { + tag, ok = sf.Tag.Lookup("json") + } + if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { + tag = string(sf.Tag) + } + + return parseTags(key, tag) +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go new file mode 100644 index 000000000..7b005a995 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -0,0 +1,137 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +const ( + timeFormatString = "2006-01-02T15:04:05.999Z07:00" +) + +// TimeCodec is the Codec used for time.Time values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// TimeCodec registered. +type TimeCodec struct { + // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. + // + // Deprecated: Use bson.Decoder.UseLocalTimeZone instead. + UseLocalTimeZone bool +} + +var ( + defaultTimeCodec = NewTimeCodec() + + // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. + _ typeDecoder = defaultTimeCodec +) + +// NewTimeCodec returns a TimeCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// TimeCodec registered. +func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { + timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) + + codec := TimeCodec{} + if timeOpt.UseLocalTimeZone != nil { + codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone + } + return &codec +} + +func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + if t != tTime { + return emptyValue, ValueDecoderError{ + Name: "TimeDecodeValue", + Types: []reflect.Type{tTime}, + Received: reflect.Zero(t), + } + } + + var timeVal time.Time + switch vrType := vr.Type(); vrType { + case bsontype.DateTime: + dt, err := vr.ReadDateTime() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(dt/1000, dt%1000*1000000) + case bsontype.String: + // assume strings are in the isoTimeFormat + timeStr, err := vr.ReadString() + if err != nil { + return emptyValue, err + } + timeVal, err = time.Parse(timeFormatString, timeStr) + if err != nil { + return emptyValue, err + } + case bsontype.Int64: + i64, err := vr.ReadInt64() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(i64/1000, i64%1000*1000000) + case bsontype.Timestamp: + t, _, err := vr.ReadTimestamp() + if err != nil { + return emptyValue, err + } + timeVal = time.Unix(int64(t), 0) + case bsontype.Null: + if err := vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err := vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) + } + + if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { + timeVal = timeVal.UTC() + } + return reflect.ValueOf(timeVal), nil +} + +// DecodeValue is the ValueDecoderFunc for time.Time. +func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() || val.Type() != tTime { + return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} + } + + elem, err := tc.decodeType(dc, vr, tTime) + if err != nil { + return err + } + + val.Set(elem) + return nil +} + +// EncodeValue is the ValueEncoderFunc for time.TIme. +func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + if !val.IsValid() || val.Type() != tTime { + return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} + } + tt := val.Interface().(time.Time) + dt := primitive.NewDateTimeFromTime(tt) + return vw.WriteDateTime(int64(dt)) +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go new file mode 100644 index 000000000..6ade17b7d --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go @@ -0,0 +1,58 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "encoding/json" + "net/url" + "reflect" + "time" + + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +var tBool = reflect.TypeOf(false) +var tFloat64 = reflect.TypeOf(float64(0)) +var tInt32 = reflect.TypeOf(int32(0)) +var tInt64 = reflect.TypeOf(int64(0)) +var tString = reflect.TypeOf("") +var tTime = reflect.TypeOf(time.Time{}) + +var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() +var tByteSlice = reflect.TypeOf([]byte(nil)) +var tByte = reflect.TypeOf(byte(0x00)) +var tURL = reflect.TypeOf(url.URL{}) +var tJSONNumber = reflect.TypeOf(json.Number("")) + +var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() +var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() +var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() +var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() +var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() + +var tBinary = reflect.TypeOf(primitive.Binary{}) +var tUndefined = reflect.TypeOf(primitive.Undefined{}) +var tOID = reflect.TypeOf(primitive.ObjectID{}) +var tDateTime = reflect.TypeOf(primitive.DateTime(0)) +var tNull = reflect.TypeOf(primitive.Null{}) +var tRegex = reflect.TypeOf(primitive.Regex{}) +var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) +var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) +var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) +var tSymbol = reflect.TypeOf(primitive.Symbol("")) +var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) +var tDecimal = reflect.TypeOf(primitive.Decimal128{}) +var tMinKey = reflect.TypeOf(primitive.MinKey{}) +var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) +var tD = reflect.TypeOf(primitive.D{}) +var tA = reflect.TypeOf(primitive.A{}) +var tE = reflect.TypeOf(primitive.E{}) + +var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) +var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go new file mode 100644 index 000000000..7eb106905 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -0,0 +1,184 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsoncodec + +import ( + "fmt" + "math" + "reflect" + + "go.mongodb.org/mongo-driver/bson/bsonoptions" + "go.mongodb.org/mongo-driver/bson/bsonrw" + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +// UIntCodec is the Codec used for uint values. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// UIntCodec registered. +type UIntCodec struct { + // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the + // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. + // + // Deprecated: Use bson.Encoder.IntMinSize instead. + EncodeToMinSize bool +} + +var ( + defaultUIntCodec = NewUIntCodec() + + // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used + // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. + _ typeDecoder = defaultUIntCodec +) + +// NewUIntCodec returns a UIntCodec with options opts. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the +// UIntCodec registered. +func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { + uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) + + codec := UIntCodec{} + if uintOpt.EncodeToMinSize != nil { + codec.EncodeToMinSize = *uintOpt.EncodeToMinSize + } + return &codec +} + +// EncodeValue is the ValueEncoder for uint types. +func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { + switch val.Kind() { + case reflect.Uint8, reflect.Uint16: + return vw.WriteInt32(int32(val.Uint())) + case reflect.Uint, reflect.Uint32, reflect.Uint64: + u64 := val.Uint() + + // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 + useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) + + if u64 <= math.MaxInt32 && useMinSize { + return vw.WriteInt32(int32(u64)) + } + if u64 > math.MaxInt64 { + return fmt.Errorf("%d overflows int64", u64) + } + return vw.WriteInt64(int64(u64)) + } + + return ValueEncoderError{ + Name: "UintEncodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } +} + +func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { + var i64 int64 + var err error + switch vrType := vr.Type(); vrType { + case bsontype.Int32: + i32, err := vr.ReadInt32() + if err != nil { + return emptyValue, err + } + i64 = int64(i32) + case bsontype.Int64: + i64, err = vr.ReadInt64() + if err != nil { + return emptyValue, err + } + case bsontype.Double: + f64, err := vr.ReadDouble() + if err != nil { + return emptyValue, err + } + if !dc.Truncate && math.Floor(f64) != f64 { + return emptyValue, errCannotTruncate + } + if f64 > float64(math.MaxInt64) { + return emptyValue, fmt.Errorf("%g overflows int64", f64) + } + i64 = int64(f64) + case bsontype.Boolean: + b, err := vr.ReadBoolean() + if err != nil { + return emptyValue, err + } + if b { + i64 = 1 + } + case bsontype.Null: + if err = vr.ReadNull(); err != nil { + return emptyValue, err + } + case bsontype.Undefined: + if err = vr.ReadUndefined(); err != nil { + return emptyValue, err + } + default: + return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) + } + + switch t.Kind() { + case reflect.Uint8: + if i64 < 0 || i64 > math.MaxUint8 { + return emptyValue, fmt.Errorf("%d overflows uint8", i64) + } + + return reflect.ValueOf(uint8(i64)), nil + case reflect.Uint16: + if i64 < 0 || i64 > math.MaxUint16 { + return emptyValue, fmt.Errorf("%d overflows uint16", i64) + } + + return reflect.ValueOf(uint16(i64)), nil + case reflect.Uint32: + if i64 < 0 || i64 > math.MaxUint32 { + return emptyValue, fmt.Errorf("%d overflows uint32", i64) + } + + return reflect.ValueOf(uint32(i64)), nil + case reflect.Uint64: + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint64", i64) + } + + return reflect.ValueOf(uint64(i64)), nil + case reflect.Uint: + if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + + return reflect.ValueOf(uint(i64)), nil + default: + return emptyValue, ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: reflect.Zero(t), + } + } +} + +// DecodeValue is the ValueDecoder for uint types. +func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { + if !val.CanSet() { + return ValueDecoderError{ + Name: "UintDecodeValue", + Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, + Received: val, + } + } + + elem, err := uic.decodeType(dc, vr, val.Type()) + if err != nil { + return err + } + + val.SetUint(elem.Uint()) + return nil +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go new file mode 100644 index 000000000..996bd1712 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type ByteSliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +} + +// ByteSliceCodec creates a new *ByteSliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func ByteSliceCodec() *ByteSliceCodecOptions { + return &ByteSliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. +func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { + bs.EncodeNilAsEmpty = &b + return bs +} + +// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { + bs := ByteSliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return bs +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go new file mode 100644 index 000000000..c40973c8d --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonoptions defines the optional configurations for the BSON codecs. +package bsonoptions diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go new file mode 100644 index 000000000..f522c7e03 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type EmptyInterfaceCodecOptions struct { + DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +} + +// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { + return &EmptyInterfaceCodecOptions{} +} + +// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. +func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { + e.DecodeBinaryAsSlice = &b + return e +} + +// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { + e := EmptyInterfaceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeBinaryAsSlice != nil { + e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice + } + } + + return e +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go new file mode 100644 index 000000000..a7a7c1d98 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go @@ -0,0 +1,82 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// MapCodecOptions represents all possible options for map encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type MapCodecOptions struct { + DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. + EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. + // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must + // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a + // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the + // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override + // TextMarshaler/TextUnmarshaler. Defaults to false. + EncodeKeysWithStringer *bool +} + +// MapCodec creates a new *MapCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func MapCodec() *MapCodecOptions { + return &MapCodecOptions{} +} + +// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. +func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { + t.DecodeZerosMap = &b + return t +} + +// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. +func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { + t.EncodeNilAsEmpty = &b + return t +} + +// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the +// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key +// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with +// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer +// will override TextMarshaler/TextUnmarshaler. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. +func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { + t.EncodeKeysWithStringer = &b + return t +} + +// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { + s := MapCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeZerosMap != nil { + s.DecodeZerosMap = opt.DecodeZerosMap + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + if opt.EncodeKeysWithStringer != nil { + s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer + } + } + + return s +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go new file mode 100644 index 000000000..3c1e4f35b --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// SliceCodecOptions represents all possible options for slice encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type SliceCodecOptions struct { + EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +} + +// SliceCodec creates a new *SliceCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func SliceCodec() *SliceCodecOptions { + return &SliceCodecOptions{} +} + +// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. +func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { + s.EncodeNilAsEmpty = &b + return s +} + +// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { + s := SliceCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeNilAsEmpty != nil { + s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty + } + } + + return s +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go new file mode 100644 index 000000000..f8b76f996 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go @@ -0,0 +1,52 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultDecodeOIDAsHex = true + +// StringCodecOptions represents all possible options for string encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type StringCodecOptions struct { + DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. +} + +// StringCodec creates a new *StringCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func StringCodec() *StringCodecOptions { + return &StringCodecOptions{} +} + +// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made +// from the raw object ID bytes will be used. Defaults to true. +// +// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. +func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { + t.DecodeObjectIDAsHex = &b + return t +} + +// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { + s := &StringCodecOptions{&defaultDecodeOIDAsHex} + for _, opt := range opts { + if opt == nil { + continue + } + if opt.DecodeObjectIDAsHex != nil { + s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex + } + } + + return s +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go new file mode 100644 index 000000000..1cbfa32e8 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go @@ -0,0 +1,107 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +var defaultOverwriteDuplicatedInlinedFields = true + +// StructCodecOptions represents all possible options for struct encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type StructCodecOptions struct { + DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. + DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. + EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. + AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. + OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. +} + +// StructCodec creates a new *StructCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func StructCodec() *StructCodecOptions { + return &StructCodecOptions{} +} + +// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. +func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { + t.DecodeZeroStruct = &b + return t +} + +// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. +// +// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. +func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { + t.DecodeDeepZeroInline = &b + return t +} + +// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all +// its values set to their default value. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. +func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { + t.EncodeOmitDefaultStruct = &b + return t +} + +// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the +// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when +// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if +// there are duplicate keys after the struct is inlined. Defaults to true. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. +func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { + t.OverwriteDuplicatedInlinedFields = &b + return t +} + +// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. +// +// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be +// supported in Go Driver 2.0. +func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { + t.AllowUnexportedFields = &b + return t +} + +// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { + s := &StructCodecOptions{ + OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, + } + for _, opt := range opts { + if opt == nil { + continue + } + + if opt.DecodeZeroStruct != nil { + s.DecodeZeroStruct = opt.DecodeZeroStruct + } + if opt.DecodeDeepZeroInline != nil { + s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline + } + if opt.EncodeOmitDefaultStruct != nil { + s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct + } + if opt.OverwriteDuplicatedInlinedFields != nil { + s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields + } + if opt.AllowUnexportedFields != nil { + s.AllowUnexportedFields = opt.AllowUnexportedFields + } + } + + return s +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go new file mode 100644 index 000000000..3f38433d2 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// TimeCodecOptions represents all possible options for time.Time encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type TimeCodecOptions struct { + UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. +} + +// TimeCodec creates a new *TimeCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func TimeCodec() *TimeCodecOptions { + return &TimeCodecOptions{} +} + +// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. +func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { + t.UseLocalTimeZone = &b + return t +} + +// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { + t := TimeCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.UseLocalTimeZone != nil { + t.UseLocalTimeZone = opt.UseLocalTimeZone + } + } + + return t +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go new file mode 100644 index 000000000..5091e4d96 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go @@ -0,0 +1,49 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonoptions + +// UIntCodecOptions represents all possible options for uint encoding and decoding. +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +type UIntCodecOptions struct { + EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +} + +// UIntCodec creates a new *UIntCodecOptions +// +// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal +// and unmarshal behavior instead. +func UIntCodec() *UIntCodecOptions { + return &UIntCodecOptions{} +} + +// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. +func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { + u.EncodeToMinSize = &b + return u +} + +// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. +// +// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a +// single options struct instead. +func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { + u := UIntCodec() + for _, opt := range opts { + if opt == nil { + continue + } + if opt.EncodeToMinSize != nil { + u.EncodeToMinSize = opt.EncodeToMinSize + } + } + + return u +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go new file mode 100644 index 000000000..c146d02e5 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -0,0 +1,488 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +// Copier is a type that allows copying between ValueReaders, ValueWriters, and +// []byte values. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +type Copier struct{} + +// NewCopier creates a new copier with the given registry. If a nil registry is provided +// a default registry is used. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func NewCopier() Copier { + return Copier{} +} + +// CopyDocument handles copying a document from src to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func CopyDocument(dst ValueWriter, src ValueReader) error { + return Copier{}.CopyDocument(dst, src) +} + +// CopyDocument handles copying one document from the src to the dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { + dr, err := src.ReadDocument() + if err != nil { + return err + } + + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + return c.copyDocumentCore(dw, dr) +} + +// CopyArrayFromBytes copies the values from a BSON array represented as a +// []byte to a ValueWriter. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { + aw, err := dst.WriteArray() + if err != nil { + return err + } + + err = c.CopyBytesToArrayWriter(aw, src) + if err != nil { + return err + } + + return aw.WriteArrayEnd() +} + +// CopyDocumentFromBytes copies the values from a BSON document represented as a +// []byte to a ValueWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { + dw, err := dst.WriteDocument() + if err != nil { + return err + } + + err = c.CopyBytesToDocumentWriter(dw, src) + if err != nil { + return err + } + + return dw.WriteDocumentEnd() +} + +type writeElementFn func(key string) (ValueWriter, error) + +// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an +// ArrayWriter. +// +// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go +// Driver 2.0. +func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { + wef := func(_ string) (ValueWriter, error) { + return dst.WriteArrayElement() + } + + return c.copyBytesToValueWriter(src, wef) +} + +// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a +// DocumentWriter. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { + wef := func(key string) (ValueWriter, error) { + return dst.WriteDocumentElement(key) + } + + return c.copyBytesToValueWriter(src, wef) +} + +func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { + // TODO(skriptble): Create errors types here. Anything thats a tag should be a property. + length, rem, ok := bsoncore.ReadLength(src) + if !ok { + return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) + } + if len(src) < int(length) { + return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) + } + rem = rem[:length-4] + + var t bsontype.Type + var key string + var val bsoncore.Value + for { + t, rem, ok = bsoncore.ReadType(rem) + if !ok { + return io.EOF + } + if t == bsontype.Type(0) { + if len(rem) != 0 { + return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) + } + break + } + + key, rem, ok = bsoncore.ReadKey(rem) + if !ok { + return fmt.Errorf("invalid key found. remaining bytes=%v", rem) + } + + // write as either array element or document element using writeElementFn + vw, err := wef(key) + if err != nil { + return err + } + + val, rem, ok = bsoncore.ReadValue(rem, t) + if !ok { + return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) + } + err = c.CopyValueFromBytes(vw, t, val.Data) + if err != nil { + return err + } + } + return nil +} + +// CopyDocumentToBytes copies an entire document from the ValueReader and +// returns it as bytes. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { + return c.AppendDocumentBytes(nil, src) +} + +// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will +// append the result to dst. +// +// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + vw.reset(dst) + + err := c.CopyDocument(vw, src) + dst = vw.buf + return dst, err +} + +// AppendArrayBytes copies an array from the ValueReader to dst. +// +// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { + if br, ok := src.(BytesReader); ok { + _, dst, err := br.ReadValueBytes(dst) + return dst, err + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + vw.reset(dst) + + err := c.copyArray(vw, src) + dst = vw.buf + return dst, err +} + +// CopyValueFromBytes will write the value represtend by t and src to dst. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. +func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { + if wvb, ok := dst.(BytesWriter); ok { + return wvb.WriteValueBytes(t, src) + } + + vr := vrPool.Get().(*valueReader) + defer vrPool.Put(vr) + + vr.reset(src) + vr.pushElement(t) + + return c.CopyValue(dst, vr) +} + +// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a +// []byte. +// +// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. +func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { + return c.AppendValueBytes(nil, src) +} + +// AppendValueBytes functions the same as CopyValueToBytes, but will append the +// result to dst. +// +// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go +// Driver 2.0. +func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { + if br, ok := src.(BytesReader); ok { + return br.ReadValueBytes(dst) + } + + vw := vwPool.Get().(*valueWriter) + defer putValueWriter(vw) + + start := len(dst) + + vw.reset(dst) + vw.push(mElement) + + err := c.CopyValue(vw, src) + if err != nil { + return 0, dst, err + } + + return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil +} + +// CopyValue will copy a single value from src to dst. +// +// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be +// supported in Go Driver 2.0. +func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { + var err error + switch src.Type() { + case bsontype.Double: + var f64 float64 + f64, err = src.ReadDouble() + if err != nil { + break + } + err = dst.WriteDouble(f64) + case bsontype.String: + var str string + str, err = src.ReadString() + if err != nil { + return err + } + err = dst.WriteString(str) + case bsontype.EmbeddedDocument: + err = c.CopyDocument(dst, src) + case bsontype.Array: + err = c.copyArray(dst, src) + case bsontype.Binary: + var data []byte + var subtype byte + data, subtype, err = src.ReadBinary() + if err != nil { + break + } + err = dst.WriteBinaryWithSubtype(data, subtype) + case bsontype.Undefined: + err = src.ReadUndefined() + if err != nil { + break + } + err = dst.WriteUndefined() + case bsontype.ObjectID: + var oid primitive.ObjectID + oid, err = src.ReadObjectID() + if err != nil { + break + } + err = dst.WriteObjectID(oid) + case bsontype.Boolean: + var b bool + b, err = src.ReadBoolean() + if err != nil { + break + } + err = dst.WriteBoolean(b) + case bsontype.DateTime: + var dt int64 + dt, err = src.ReadDateTime() + if err != nil { + break + } + err = dst.WriteDateTime(dt) + case bsontype.Null: + err = src.ReadNull() + if err != nil { + break + } + err = dst.WriteNull() + case bsontype.Regex: + var pattern, options string + pattern, options, err = src.ReadRegex() + if err != nil { + break + } + err = dst.WriteRegex(pattern, options) + case bsontype.DBPointer: + var ns string + var pointer primitive.ObjectID + ns, pointer, err = src.ReadDBPointer() + if err != nil { + break + } + err = dst.WriteDBPointer(ns, pointer) + case bsontype.JavaScript: + var js string + js, err = src.ReadJavascript() + if err != nil { + break + } + err = dst.WriteJavascript(js) + case bsontype.Symbol: + var symbol string + symbol, err = src.ReadSymbol() + if err != nil { + break + } + err = dst.WriteSymbol(symbol) + case bsontype.CodeWithScope: + var code string + var srcScope DocumentReader + code, srcScope, err = src.ReadCodeWithScope() + if err != nil { + break + } + + var dstScope DocumentWriter + dstScope, err = dst.WriteCodeWithScope(code) + if err != nil { + break + } + err = c.copyDocumentCore(dstScope, srcScope) + case bsontype.Int32: + var i32 int32 + i32, err = src.ReadInt32() + if err != nil { + break + } + err = dst.WriteInt32(i32) + case bsontype.Timestamp: + var t, i uint32 + t, i, err = src.ReadTimestamp() + if err != nil { + break + } + err = dst.WriteTimestamp(t, i) + case bsontype.Int64: + var i64 int64 + i64, err = src.ReadInt64() + if err != nil { + break + } + err = dst.WriteInt64(i64) + case bsontype.Decimal128: + var d128 primitive.Decimal128 + d128, err = src.ReadDecimal128() + if err != nil { + break + } + err = dst.WriteDecimal128(d128) + case bsontype.MinKey: + err = src.ReadMinKey() + if err != nil { + break + } + err = dst.WriteMinKey() + case bsontype.MaxKey: + err = src.ReadMaxKey() + if err != nil { + break + } + err = dst.WriteMaxKey() + default: + err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) + } + + return err +} + +func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { + ar, err := src.ReadArray() + if err != nil { + return err + } + + aw, err := dst.WriteArray() + if err != nil { + return err + } + + for { + vr, err := ar.ReadValue() + if err == ErrEOA { + break + } + if err != nil { + return err + } + + vw, err := aw.WriteArrayElement() + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return aw.WriteArrayEnd() +} + +func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { + for { + key, vr, err := dr.ReadElement() + if err == ErrEOD { + break + } + if err != nil { + return err + } + + vw, err := dw.WriteDocumentElement(key) + if err != nil { + return err + } + + err = c.CopyValue(vw, vr) + if err != nil { + return err + } + } + + return dw.WriteDocumentEnd() +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go new file mode 100644 index 000000000..750b0d2af --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonrw contains abstractions for reading and writing +// BSON and BSON like types from sources. +package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go new file mode 100644 index 000000000..54c76bf74 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -0,0 +1,806 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + + "go.mongodb.org/mongo-driver/bson/bsontype" +) + +const maxNestingDepth = 200 + +// ErrInvalidJSON indicates the JSON input is invalid +var ErrInvalidJSON = errors.New("invalid JSON input") + +type jsonParseState byte + +const ( + jpsStartState jsonParseState = iota + jpsSawBeginObject + jpsSawEndObject + jpsSawBeginArray + jpsSawEndArray + jpsSawColon + jpsSawComma + jpsSawKey + jpsSawValue + jpsDoneState + jpsInvalidState +) + +type jsonParseMode byte + +const ( + jpmInvalidMode jsonParseMode = iota + jpmObjectMode + jpmArrayMode +) + +type extJSONValue struct { + t bsontype.Type + v interface{} +} + +type extJSONObject struct { + keys []string + values []*extJSONValue +} + +type extJSONParser struct { + js *jsonScanner + s jsonParseState + m []jsonParseMode + k string + v *extJSONValue + + err error + canonical bool + depth int + maxDepth int + + emptyObject bool + relaxedUUID bool +} + +// newExtJSONParser returns a new extended JSON parser, ready to to begin +// parsing from the first character of the argued json input. It will not +// perform any read-ahead and will therefore not report any errors about +// malformed JSON at this point. +func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { + return &extJSONParser{ + js: &jsonScanner{r: r}, + s: jpsStartState, + m: []jsonParseMode{}, + canonical: canonical, + maxDepth: maxNestingDepth, + } +} + +// peekType examines the next value and returns its BSON Type +func (ejp *extJSONParser) peekType() (bsontype.Type, error) { + var t bsontype.Type + var err error + initialState := ejp.s + + ejp.advanceState() + switch ejp.s { + case jpsSawValue: + t = ejp.v.t + case jpsSawBeginArray: + t = bsontype.Array + case jpsInvalidState: + err = ejp.err + case jpsSawComma: + // in array mode, seeing a comma means we need to progress again to actually observe a type + if ejp.peekMode() == jpmArrayMode { + return ejp.peekType() + } + case jpsSawEndArray: + // this would only be a valid state if we were in array mode, so return end-of-array error + err = ErrEOA + case jpsSawBeginObject: + // peek key to determine type + ejp.advanceState() + switch ejp.s { + case jpsSawEndObject: // empty embedded document + t = bsontype.EmbeddedDocument + ejp.emptyObject = true + case jpsInvalidState: + err = ejp.err + case jpsSawKey: + if initialState == jpsStartState { + return bsontype.EmbeddedDocument, nil + } + t = wrapperKeyBSONType(ejp.k) + + // if $uuid is encountered, parse as binary subtype 4 + if ejp.k == "$uuid" { + ejp.relaxedUUID = true + t = bsontype.Binary + } + + switch t { + case bsontype.JavaScript: + // just saw $code, need to check for $scope at same level + _, err = ejp.readValue(bsontype.JavaScript) + if err != nil { + break + } + + switch ejp.s { + case jpsSawEndObject: // type is TypeJavaScript + case jpsSawComma: + ejp.advanceState() + + if ejp.s == jpsSawKey && ejp.k == "$scope" { + t = bsontype.CodeWithScope + } else { + err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) + } + case jpsInvalidState: + err = ejp.err + default: + err = ErrInvalidJSON + } + case bsontype.CodeWithScope: + err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") + } + } + } + + return t, err +} + +// readKey parses the next key and its type and returns them +func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { + if ejp.emptyObject { + ejp.emptyObject = false + return "", 0, ErrEOD + } + + // advance to key (or return with error) + switch ejp.s { + case jpsStartState: + ejp.advanceState() + if ejp.s == jpsSawBeginObject { + ejp.advanceState() + } + case jpsSawBeginObject: + ejp.advanceState() + case jpsSawValue, jpsSawEndObject, jpsSawEndArray: + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject, jpsSawComma: + ejp.advanceState() + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsDoneState: + return "", 0, io.EOF + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, ErrInvalidJSON + } + case jpsSawKey: // do nothing (key was peeked before) + default: + return "", 0, invalidRequestError("key") + } + + // read key + var key string + + switch ejp.s { + case jpsSawKey: + key = ejp.k + case jpsSawEndObject: + return "", 0, ErrEOD + case jpsInvalidState: + return "", 0, ejp.err + default: + return "", 0, invalidRequestError("key") + } + + // check for colon + ejp.advanceState() + if err := ensureColon(ejp.s, key); err != nil { + return "", 0, err + } + + // peek at the value to determine type + t, err := ejp.peekType() + if err != nil { + return "", 0, err + } + + return key, t, nil +} + +// readValue returns the value corresponding to the Type returned by peekType +func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { + if ejp.s == jpsInvalidState { + return nil, ejp.err + } + + var v *extJSONValue + + switch t { + case bsontype.Null, bsontype.Boolean, bsontype.String: + if ejp.s != jpsSawValue { + return nil, invalidRequestError(t.String()) + } + v = ejp.v + case bsontype.Int32, bsontype.Int64, bsontype.Double: + // relaxed version allows these to be literal number values + if ejp.s == jpsSawValue { + v = ejp.v + break + } + fallthrough + case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { + return nil, invalidJSONErrorForType("value", t) + } + + v = ejp.v + + // read end object + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("} after value", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: + if ejp.s != jpsSawKey { + return nil, invalidRequestError(t.String()) + } + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + if t == bsontype.Binary && ejp.s == jpsSawValue { + // convert relaxed $uuid format + if ejp.relaxedUUID { + defer func() { ejp.relaxedUUID = false }() + uuid, err := ejp.v.parseSymbol() + if err != nil { + return nil, err + } + + // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing + // in the 8th, 13th, 18th, and 23rd characters. + // + // See https://tools.ietf.org/html/rfc4122#section-3 + valid := len(uuid) == 36 && + string(uuid[8]) == "-" && + string(uuid[13]) == "-" && + string(uuid[18]) == "-" && + string(uuid[23]) == "-" + if !valid { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") + } + + // remove hyphens + uuidNoHyphens := strings.Replace(uuid, "-", "", -1) + if len(uuidNoHyphens) != 32 { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") + } + + // convert hex to bytes + bytes, err := hex.DecodeString(uuidNoHyphens) + if err != nil { + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) + } + + base64 := &extJSONValue{ + t: bsontype.String, + v: base64.StdEncoding.EncodeToString(bytes), + } + subType := &extJSONValue{ + t: bsontype.String, + v: "04", + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + + break + } + + // convert legacy $binary format + base64 := ejp.v + + ejp.advanceState() + if ejp.s != jpsSawComma { + return nil, invalidJSONErrorForType(",", bsontype.Binary) + } + + ejp.advanceState() + key, t, err := ejp.readKey() + if err != nil { + return nil, err + } + if key != "$type" { + return nil, invalidJSONErrorForType("$type", bsontype.Binary) + } + + subType, err := ejp.readValue(t) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) + } + + v = &extJSONValue{ + t: bsontype.EmbeddedDocument, + v: &extJSONObject{ + keys: []string{"base64", "subType"}, + values: []*extJSONValue{base64, subType}, + }, + } + break + } + + // read KV pairs + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONErrorForType("{", t) + } + + keys, vals, err := ejp.readObject(2, true) + if err != nil { + return nil, err + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) + } + + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + + case bsontype.DateTime: + switch ejp.s { + case jpsSawValue: + v = ejp.v + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + ejp.advanceState() + switch ejp.s { + case jpsSawBeginObject: + keys, vals, err := ejp.readObject(1, true) + if err != nil { + return nil, err + } + v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} + case jpsSawValue: + if ejp.canonical { + return nil, invalidJSONError("{") + } + v = ejp.v + default: + if ejp.canonical { + return nil, invalidJSONErrorForType("object", t) + } + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, invalidJSONErrorForType("value and then }", t) + } + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.JavaScript: + switch ejp.s { + case jpsSawKey: + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read value + ejp.advanceState() + if ejp.s != jpsSawValue { + return nil, invalidJSONErrorForType("value", t) + } + v = ejp.v + + // read end object or comma and just return + ejp.advanceState() + case jpsSawEndObject: + v = ejp.v + default: + return nil, invalidRequestError(t.String()) + } + case bsontype.CodeWithScope: + if ejp.s == jpsSawKey && ejp.k == "$scope" { + v = ejp.v // this is the $code string from earlier + + // read colon + ejp.advanceState() + if err := ensureColon(ejp.s, ejp.k); err != nil { + return nil, err + } + + // read { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, invalidJSONError("$scope to be embedded document") + } + } else { + return nil, invalidRequestError(t.String()) + } + case bsontype.EmbeddedDocument, bsontype.Array: + return nil, invalidRequestError(t.String()) + } + + return v, nil +} + +// readObject is a utility method for reading full objects of known (or expected) size +// it is useful for extended JSON types such as binary, datetime, regex, and timestamp +func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { + keys := make([]string, numKeys) + vals := make([]*extJSONValue, numKeys) + + if !started { + ejp.advanceState() + if ejp.s != jpsSawBeginObject { + return nil, nil, invalidJSONError("{") + } + } + + for i := 0; i < numKeys; i++ { + key, t, err := ejp.readKey() + if err != nil { + return nil, nil, err + } + + switch ejp.s { + case jpsSawKey: + v, err := ejp.readValue(t) + if err != nil { + return nil, nil, err + } + + keys[i] = key + vals[i] = v + case jpsSawValue: + keys[i] = key + vals[i] = ejp.v + default: + return nil, nil, invalidJSONError("value") + } + } + + ejp.advanceState() + if ejp.s != jpsSawEndObject { + return nil, nil, invalidJSONError("}") + } + + return keys, vals, nil +} + +// advanceState reads the next JSON token from the scanner and transitions +// from the current state based on that token's type +func (ejp *extJSONParser) advanceState() { + if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { + return + } + + jt, err := ejp.js.nextToken() + + if err != nil { + ejp.err = err + ejp.s = jpsInvalidState + return + } + + valid := ejp.validateToken(jt.t) + if !valid { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + return + } + + switch jt.t { + case jttBeginObject: + ejp.s = jpsSawBeginObject + ejp.pushMode(jpmObjectMode) + ejp.depth++ + + if ejp.depth > ejp.maxDepth { + ejp.err = nestingDepthError(jt.p, ejp.depth) + ejp.s = jpsInvalidState + } + case jttEndObject: + ejp.s = jpsSawEndObject + ejp.depth-- + + if ejp.popMode() != jpmObjectMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttBeginArray: + ejp.s = jpsSawBeginArray + ejp.pushMode(jpmArrayMode) + case jttEndArray: + ejp.s = jpsSawEndArray + + if ejp.popMode() != jpmArrayMode { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttColon: + ejp.s = jpsSawColon + case jttComma: + ejp.s = jpsSawComma + case jttEOF: + ejp.s = jpsDoneState + if len(ejp.m) != 0 { + ejp.err = unexpectedTokenError(jt) + ejp.s = jpsInvalidState + } + case jttString: + switch ejp.s { + case jpsSawComma: + if ejp.peekMode() == jpmArrayMode { + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + return + } + fallthrough + case jpsSawBeginObject: + ejp.s = jpsSawKey + ejp.k = jt.v.(string) + return + } + fallthrough + default: + ejp.s = jpsSawValue + ejp.v = extendJSONToken(jt) + } +} + +var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ + jpsStartState: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + jttEOF: true, + }, + jpsSawBeginObject: { + jttEndObject: true, + jttString: true, + }, + jpsSawEndObject: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawBeginArray: { + jttBeginObject: true, + jttBeginArray: true, + jttEndArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawEndArray: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsSawColon: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawComma: { + jttBeginObject: true, + jttBeginArray: true, + jttInt32: true, + jttInt64: true, + jttDouble: true, + jttString: true, + jttBool: true, + jttNull: true, + }, + jpsSawKey: { + jttColon: true, + }, + jpsSawValue: { + jttEndObject: true, + jttEndArray: true, + jttComma: true, + jttEOF: true, + }, + jpsDoneState: {}, + jpsInvalidState: {}, +} + +func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { + switch ejp.s { + case jpsSawEndObject: + // if we are at depth zero and the next token is a '{', + // we can consider it valid only if we are not in array mode. + if jtt == jttBeginObject && ejp.depth == 0 { + return ejp.peekMode() != jpmArrayMode + } + case jpsSawComma: + switch ejp.peekMode() { + // the only valid next token after a comma inside a document is a string (a key) + case jpmObjectMode: + return jtt == jttString + case jpmInvalidMode: + return false + } + } + + _, ok := jpsValidTransitionTokens[ejp.s][jtt] + return ok +} + +// ensureExtValueType returns true if the current value has the expected +// value type for single-key extended JSON types. For example, +// {"$numberInt": v} v must be TypeString +func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { + switch t { + case bsontype.MinKey, bsontype.MaxKey: + return ejp.v.t == bsontype.Int32 + case bsontype.Undefined: + return ejp.v.t == bsontype.Boolean + case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: + return ejp.v.t == bsontype.String + default: + return false + } +} + +func (ejp *extJSONParser) pushMode(m jsonParseMode) { + ejp.m = append(ejp.m, m) +} + +func (ejp *extJSONParser) popMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + m := ejp.m[l-1] + ejp.m = ejp.m[:l-1] + + return m +} + +func (ejp *extJSONParser) peekMode() jsonParseMode { + l := len(ejp.m) + if l == 0 { + return jpmInvalidMode + } + + return ejp.m[l-1] +} + +func extendJSONToken(jt *jsonToken) *extJSONValue { + var t bsontype.Type + + switch jt.t { + case jttInt32: + t = bsontype.Int32 + case jttInt64: + t = bsontype.Int64 + case jttDouble: + t = bsontype.Double + case jttString: + t = bsontype.String + case jttBool: + t = bsontype.Boolean + case jttNull: + t = bsontype.Null + default: + return nil + } + + return &extJSONValue{t: t, v: jt.v} +} + +func ensureColon(s jsonParseState, key string) error { + if s != jpsSawColon { + return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) + } + + return nil +} + +func invalidRequestError(s string) error { + return fmt.Errorf("invalid request to read %s", s) +} + +func invalidJSONError(expected string) error { + return fmt.Errorf("invalid JSON input; expected %s", expected) +} + +func invalidJSONErrorForType(expected string, t bsontype.Type) error { + return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) +} + +func unexpectedTokenError(jt *jsonToken) error { + switch jt.t { + case jttInt32, jttInt64, jttDouble: + return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) + case jttString: + return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) + case jttBool: + return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) + case jttNull: + return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) + case jttEOF: + return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) + default: + return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) + } +} + +func nestingDepthError(p, depth int) error { + return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go new file mode 100644 index 000000000..2aca37a91 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -0,0 +1,652 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package bsonrw + +import ( + "fmt" + "io" + "sync" + + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +type ExtJSONValueReaderPool struct { + pool sync.Pool +} + +// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { + return &ExtJSONValueReaderPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(extJSONValueReader) + }, + }, + } +} + +// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { + vr := bvrp.pool.Get().(*extJSONValueReader) + return vr.reset(r, canonical) +} + +// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing +// is inserted into the pool and ok will be false. +// +// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. +func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { + bvr, ok := vr.(*extJSONValueReader) + if !ok { + return false + } + + bvr, _ = bvr.reset(nil, false) + bvrp.pool.Put(bvr) + return true +} + +type ejvrState struct { + mode mode + vType bsontype.Type + depth int +} + +// extJSONValueReader is for reading extended JSON. +type extJSONValueReader struct { + p *extJSONParser + + stack []ejvrState + frame int +} + +// NewExtJSONValueReader creates a new ValueReader from a given io.Reader +// It will interpret the JSON of r as canonical or relaxed according to the +// given canonical flag +func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { + return newExtJSONValueReader(r, canonical) +} + +func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { + ejvr := new(extJSONValueReader) + return ejvr.reset(r, canonical) +} + +func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { + p := newExtJSONParser(r, canonical) + typ, err := p.peekType() + + if err != nil { + return nil, ErrInvalidJSON + } + + var m mode + switch typ { + case bsontype.EmbeddedDocument: + m = mTopLevel + case bsontype.Array: + m = mArray + default: + m = mValue + } + + stack := make([]ejvrState, 1, 5) + stack[0] = ejvrState{ + mode: m, + vType: typ, + } + return &extJSONValueReader{ + p: p, + stack: stack, + }, nil +} + +func (ejvr *extJSONValueReader) advanceFrame() { + if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack + length := len(ejvr.stack) + if length+1 >= cap(ejvr.stack) { + // double it + buf := make([]ejvrState, 2*cap(ejvr.stack)+1) + copy(buf, ejvr.stack) + ejvr.stack = buf + } + ejvr.stack = ejvr.stack[:length+1] + } + ejvr.frame++ + + // Clean the stack + ejvr.stack[ejvr.frame].mode = 0 + ejvr.stack[ejvr.frame].vType = 0 + ejvr.stack[ejvr.frame].depth = 0 +} + +func (ejvr *extJSONValueReader) pushDocument() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mDocument + ejvr.stack[ejvr.frame].depth = ejvr.p.depth +} + +func (ejvr *extJSONValueReader) pushCodeWithScope() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mCodeWithScope +} + +func (ejvr *extJSONValueReader) pushArray() { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = mArray +} + +func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { + ejvr.advanceFrame() + + ejvr.stack[ejvr.frame].mode = m + ejvr.stack[ejvr.frame].vType = t +} + +func (ejvr *extJSONValueReader) pop() { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + ejvr.frame-- + case mDocument, mArray, mCodeWithScope: + ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... + } +} + +func (ejvr *extJSONValueReader) skipObject() { + // read entire object until depth returns to 0 (last ending } or ] seen) + depth := 1 + for depth > 0 { + ejvr.p.advanceState() + + // If object is empty, raise depth and continue. When emptyObject is true, the + // parser has already read both the opening and closing brackets of an empty + // object ("{}"), so the next valid token will be part of the parent document, + // not part of the nested document. + // + // If there is a comma, there are remaining fields, emptyObject must be set back + // to false, and comma must be skipped with advanceState(). + if ejvr.p.emptyObject { + if ejvr.p.s == jpsSawComma { + ejvr.p.emptyObject = false + ejvr.p.advanceState() + } + depth-- + continue + } + + switch ejvr.p.s { + case jpsSawBeginObject, jpsSawBeginArray: + depth++ + case jpsSawEndObject, jpsSawEndArray: + depth-- + } + } +} + +func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { + te := TransitionError{ + name: name, + current: ejvr.stack[ejvr.frame].mode, + destination: destination, + modes: modes, + action: "read", + } + if ejvr.frame != 0 { + te.parent = ejvr.stack[ejvr.frame-1].mode + } + return te +} + +func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { + return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) +} + +func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != t { + return ejvr.typeError(t) + } + default: + modes := []mode{mElement, mValue} + if addModes != nil { + modes = append(modes, addModes...) + } + return ejvr.invalidTransitionErr(destination, callerName, modes) + } + + return nil +} + +func (ejvr *extJSONValueReader) Type() bsontype.Type { + return ejvr.stack[ejvr.frame].vType +} + +func (ejvr *extJSONValueReader) Skip() error { + switch ejvr.stack[ejvr.frame].mode { + case mElement, mValue: + default: + return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) + } + + defer ejvr.pop() + + t := ejvr.stack[ejvr.frame].vType + switch t { + case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: + // read entire array, doc or CodeWithScope + ejvr.skipObject() + default: + _, err := ejvr.p.readValue(t) + if err != nil { + return err + } + } + + return nil +} + +func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: // allow reading array from top level + case mArray: + return ejvr, nil + default: + if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { + return nil, err + } + } + + ejvr.pushArray() + + return ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { + if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { + return nil, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Binary) + if err != nil { + return nil, 0, err + } + + b, btype, err = v.parseBinary() + + ejvr.pop() + return b, btype, err +} + +func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { + if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { + return false, err + } + + v, err := ejvr.p.readValue(bsontype.Boolean) + if err != nil { + return false, err + } + + if v.t != bsontype.Boolean { + return false, fmt.Errorf("expected type bool, but got type %s", v.t) + } + + ejvr.pop() + return v.v.(bool), nil +} + +func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel: + return ejvr, nil + case mElement, mValue: + if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { + return nil, ejvr.typeError(bsontype.EmbeddedDocument) + } + + ejvr.pushDocument() + return ejvr, nil + default: + return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) + } +} + +func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { + if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { + return "", nil, err + } + + v, err := ejvr.p.readValue(bsontype.CodeWithScope) + if err != nil { + return "", nil, err + } + + code, err = v.parseJavascript() + + ejvr.pushCodeWithScope() + return code, ejvr, err +} + +func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { + if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { + return "", primitive.NilObjectID, err + } + + v, err := ejvr.p.readValue(bsontype.DBPointer) + if err != nil { + return "", primitive.NilObjectID, err + } + + ns, oid, err = v.parseDBPointer() + + ejvr.pop() + return ns, oid, err +} + +func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.DateTime) + if err != nil { + return 0, err + } + + d, err := v.parseDateTime() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { + if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { + return primitive.Decimal128{}, err + } + + v, err := ejvr.p.readValue(bsontype.Decimal128) + if err != nil { + return primitive.Decimal128{}, err + } + + d, err := v.parseDecimal128() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { + if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Double) + if err != nil { + return 0, err + } + + d, err := v.parseDouble() + + ejvr.pop() + return d, err +} + +func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { + if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int32) + if err != nil { + return 0, err + } + + i, err := v.parseInt32() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { + if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { + return 0, err + } + + v, err := ejvr.p.readValue(bsontype.Int64) + if err != nil { + return 0, err + } + + i, err := v.parseInt64() + + ejvr.pop() + return i, err +} + +func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { + if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.JavaScript) + if err != nil { + return "", err + } + + code, err = v.parseJavascript() + + ejvr.pop() + return code, err +} + +func (ejvr *extJSONValueReader) ReadMaxKey() error { + if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MaxKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("max") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadMinKey() error { + if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.MinKey) + if err != nil { + return err + } + + err = v.parseMinMaxKey("min") + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadNull() error { + if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Null) + if err != nil { + return err + } + + if v.t != bsontype.Null { + return fmt.Errorf("expected type null but got type %s", v.t) + } + + ejvr.pop() + return nil +} + +func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { + if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { + return primitive.ObjectID{}, err + } + + v, err := ejvr.p.readValue(bsontype.ObjectID) + if err != nil { + return primitive.ObjectID{}, err + } + + oid, err := v.parseObjectID() + + ejvr.pop() + return oid, err +} + +func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { + if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { + return "", "", err + } + + v, err := ejvr.p.readValue(bsontype.Regex) + if err != nil { + return "", "", err + } + + pattern, options, err = v.parseRegex() + + ejvr.pop() + return pattern, options, err +} + +func (ejvr *extJSONValueReader) ReadString() (string, error) { + if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.String) + if err != nil { + return "", err + } + + if v.t != bsontype.String { + return "", fmt.Errorf("expected type string but got type %s", v.t) + } + + ejvr.pop() + return v.v.(string), nil +} + +func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { + if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { + return "", err + } + + v, err := ejvr.p.readValue(bsontype.Symbol) + if err != nil { + return "", err + } + + symbol, err = v.parseSymbol() + + ejvr.pop() + return symbol, err +} + +func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { + if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { + return 0, 0, err + } + + v, err := ejvr.p.readValue(bsontype.Timestamp) + if err != nil { + return 0, 0, err + } + + t, i, err = v.parseTimestamp() + + ejvr.pop() + return t, i, err +} + +func (ejvr *extJSONValueReader) ReadUndefined() error { + if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { + return err + } + + v, err := ejvr.p.readValue(bsontype.Undefined) + if err != nil { + return err + } + + err = v.parseUndefined() + + ejvr.pop() + return err +} + +func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mTopLevel, mDocument, mCodeWithScope: + default: + return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) + } + + name, t, err := ejvr.p.readKey() + + if err != nil { + if err == ErrEOD { + if ejvr.stack[ejvr.frame].mode == mCodeWithScope { + _, err := ejvr.p.peekType() + if err != nil { + return "", nil, err + } + } + + ejvr.pop() + } + + return "", nil, err + } + + ejvr.push(mElement, t) + return name, ejvr, nil +} + +func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { + switch ejvr.stack[ejvr.frame].mode { + case mArray: + default: + return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) + } + + t, err := ejvr.p.peekType() + if err != nil { + if err == ErrEOA { + ejvr.pop() + } + + return nil, err + } + + ejvr.push(mValue, t) + return ejvr, nil +} diff --git a/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go new file mode 100644 index 000000000..ba39c9601 --- /dev/null +++ b/hotelReservation/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go @@ -0,0 +1,223 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on github.com/golang/go by The Go Authors +// See THIRD-PARTY-NOTICES for original license terms. + +package bsonrw + +import "unicode/utf8" + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML