diff --git a/api/go.mod b/api/go.mod index afaa2a54a..b59986ef5 100644 --- a/api/go.mod +++ b/api/go.mod @@ -23,9 +23,9 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sys v0.9.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/tools v0.9.3 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/api/go.sum b/api/go.sum index d31f65bdd..f2ffd3bd6 100644 --- a/api/go.sum +++ b/api/go.sum @@ -59,8 +59,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -68,12 +68,12 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= diff --git a/api/vendor/github.com/go-logr/logr/.golangci.yaml b/api/vendor/github.com/go-logr/logr/.golangci.yaml index 94ff801df..0cffafa7b 100644 --- a/api/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/api/vendor/github.com/go-logr/logr/.golangci.yaml @@ -6,7 +6,6 @@ linters: disable-all: true enable: - asciicheck - - deadcode - errcheck - forcetypeassert - gocritic @@ -18,10 +17,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: exclude-use-default: false diff --git a/api/vendor/github.com/go-logr/logr/README.md b/api/vendor/github.com/go-logr/logr/README.md index ad825f5f0..ab5931181 100644 --- a/api/vendor/github.com/go-logr/logr/README.md +++ b/api/vendor/github.com/go-logr/logr/README.md @@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) ## FAQ diff --git a/api/vendor/github.com/go-logr/logr/discard.go b/api/vendor/github.com/go-logr/logr/discard.go index 9d92a38f1..99fe8be93 100644 --- a/api/vendor/github.com/go-logr/logr/discard.go +++ b/api/vendor/github.com/go-logr/logr/discard.go @@ -20,35 +20,5 @@ package logr // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { - return Logger{ - level: 0, - sink: discardLogSink{}, - } -} - -// discardLogSink is a LogSink that discards all messages. -type discardLogSink struct{} - -// Verify that it actually implements the interface -var _ LogSink = discardLogSink{} - -func (l discardLogSink) Init(RuntimeInfo) { -} - -func (l discardLogSink) Enabled(int) bool { - return false -} - -func (l discardLogSink) Info(int, string, ...interface{}) { -} - -func (l discardLogSink) Error(error, string, ...interface{}) { -} - -func (l discardLogSink) WithValues(...interface{}) LogSink { - return l -} - -func (l discardLogSink) WithName(string) LogSink { - return l + return New(nil) } diff --git a/api/vendor/github.com/go-logr/logr/funcr/funcr.go b/api/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 000000000..e52f0cd01 --- /dev/null +++ b/api/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,804 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// # Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// # Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: &opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts *Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + printComma := false // testing i>0 is not enough because of JSON omitted fields + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if printComma { + buf.WriteByte(',') + } + printComma = true // if we got here, we are rendering a field + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/api/vendor/github.com/go-logr/logr/logr.go b/api/vendor/github.com/go-logr/logr/logr.go index 44cd398c9..e027aea3f 100644 --- a/api/vendor/github.com/go-logr/logr/logr.go +++ b/api/vendor/github.com/go-logr/logr/logr.go @@ -21,7 +21,7 @@ limitations under the License. // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main @@ -30,22 +30,28 @@ limitations under the License. // "structured logging". // // With Go's standard log package, we might write: -// log.Printf("setting target value %s", targetValue) +// +// log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: -// logger.Info("setting target", "value", targetValue) +// +// logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: -// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional -// information (such as stack traces) on calls to Error(). +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. // -// Verbosity +// # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. @@ -53,22 +59,25 @@ limitations under the License. // Log-lines with V-levels that are not enabled (as per the LogSink) will not // be written. Level V(0) is the default, and logger.V(0).Info() has the same // meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. // // Where we might have written: -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // // We can write: -// logger.V(2).Info("an unusual thing happened") // -// Logger Names +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // -// logger.WithName("compactor").Info("started", "time", time.Now()) +// logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() @@ -79,25 +88,27 @@ limitations under the License. // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // -// Saved Values +// # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // // With logr we'd write: -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) // -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// Best Practices +// # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some @@ -112,15 +123,24 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // -// Key Naming Conventions +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// * use lower case for simple keys and lowerCamelCase for more complex ones +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -129,51 +149,54 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// * "caller": the calling information (file/line) of a particular log line -// * "error": the underlying error value in the `Error` method -// * "level": the log level -// * "logger": the name of the associated logger -// * "msg": the log message -// * "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// * "ts": the timestamp for a log line +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // -// Break Glass +// # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } // // Logger grants access to the sink to enable type assertions like this: -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink()(impl.Underlier) { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and @@ -189,11 +212,14 @@ import ( ) // New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) - sink.Init(runtimeInfo) + if sink != nil { + sink.Init(runtimeInfo) + } return logger } @@ -232,7 +258,7 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { - return l.sink.Enabled(l.level) + return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. @@ -242,6 +268,9 @@ func (l Logger) Enabled() bool { // information. The key/value pairs must alternate string keys and arbitrary // values. func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if l.Enabled() { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() @@ -253,12 +282,17 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // Error logs an error, with the given message and key/value pairs as context. // It functions similarly to Info, but may have unique behavior, and should be // preferred for logging errors (see the package documentations for more -// information). +// information). The log message will always be emitted, regardless of +// verbosity level. // // The msg argument should be used to add context to any underlying error, // while the err argument should be used to attach the actual error that -// triggered this log line, if present. +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -270,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } if level < 0 { level = 0 } @@ -280,6 +317,9 @@ func (l Logger) V(level int) Logger { // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithValues(keysAndValues...)) return l } @@ -290,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithName(name)) return l } @@ -310,6 +353,9 @@ func (l Logger) WithName(name string) Logger { // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } @@ -331,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger { // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) @@ -343,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { return helper, l } +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil +} + // contextKey is how we find Loggers in a context.Context. type contextKey struct{} @@ -428,7 +482,7 @@ type LogSink interface { WithName(name string) LogSink } -// CallDepthLogSink represents a Logger that knows how to climb the call stack +// CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -453,7 +507,7 @@ type CallDepthLogSink interface { WithCallDepth(depth int) LogSink } -// CallStackHelperLogSink represents a Logger that knows how to climb +// CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. diff --git a/api/vendor/github.com/go-task/slim-sprig/.editorconfig b/api/vendor/github.com/go-task/slim-sprig/.editorconfig new file mode 100644 index 000000000..b0c95367e --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/api/vendor/github.com/go-task/slim-sprig/.gitattributes b/api/vendor/github.com/go-task/slim-sprig/.gitattributes new file mode 100644 index 000000000..176a458f9 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/api/vendor/github.com/go-task/slim-sprig/.gitignore b/api/vendor/github.com/go-task/slim-sprig/.gitignore new file mode 100644 index 000000000..5e3002f88 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/api/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/api/vendor/github.com/go-task/slim-sprig/CHANGELOG.md new file mode 100644 index 000000000..61d8ebffc --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/CHANGELOG.md @@ -0,0 +1,364 @@ +# Changelog + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/api/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/api/vendor/github.com/go-task/slim-sprig/LICENSE.txt new file mode 100644 index 000000000..f311b1eaa --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/api/vendor/github.com/go-task/slim-sprig/README.md b/api/vendor/github.com/go-task/slim-sprig/README.md new file mode 100644 index 000000000..72579471f --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/api/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/api/vendor/github.com/go-task/slim-sprig/Taskfile.yml new file mode 100644 index 000000000..cdcfd223b --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '2' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/api/vendor/github.com/go-task/slim-sprig/crypto.go b/api/vendor/github.com/go-task/slim-sprig/crypto.go new file mode 100644 index 000000000..d06e516d4 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/api/vendor/github.com/go-task/slim-sprig/date.go b/api/vendor/github.com/go-task/slim-sprig/date.go new file mode 100644 index 000000000..ed022ddac --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/api/vendor/github.com/go-task/slim-sprig/defaults.go b/api/vendor/github.com/go-task/slim-sprig/defaults.go new file mode 100644 index 000000000..b9f979666 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/api/vendor/github.com/go-task/slim-sprig/dict.go b/api/vendor/github.com/go-task/slim-sprig/dict.go new file mode 100644 index 000000000..77ebc61b1 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/api/vendor/github.com/go-task/slim-sprig/doc.go b/api/vendor/github.com/go-task/slim-sprig/doc.go new file mode 100644 index 000000000..aabb9d448 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/api/vendor/github.com/go-task/slim-sprig/functions.go b/api/vendor/github.com/go-task/slim-sprig/functions.go new file mode 100644 index 000000000..5ea74f899 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/api/vendor/github.com/go-task/slim-sprig/list.go b/api/vendor/github.com/go-task/slim-sprig/list.go new file mode 100644 index 000000000..ca0fbb789 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/api/vendor/github.com/go-task/slim-sprig/network.go b/api/vendor/github.com/go-task/slim-sprig/network.go new file mode 100644 index 000000000..108d78a94 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/api/vendor/github.com/go-task/slim-sprig/numeric.go b/api/vendor/github.com/go-task/slim-sprig/numeric.go new file mode 100644 index 000000000..98cbb37a1 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/api/vendor/github.com/go-task/slim-sprig/reflect.go b/api/vendor/github.com/go-task/slim-sprig/reflect.go new file mode 100644 index 000000000..8a65c132f --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/api/vendor/github.com/go-task/slim-sprig/regex.go b/api/vendor/github.com/go-task/slim-sprig/regex.go new file mode 100644 index 000000000..fab551018 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/api/vendor/github.com/go-task/slim-sprig/strings.go b/api/vendor/github.com/go-task/slim-sprig/strings.go new file mode 100644 index 000000000..3c62d6b6f --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/api/vendor/github.com/go-task/slim-sprig/url.go b/api/vendor/github.com/go-task/slim-sprig/url.go new file mode 100644 index 000000000..b8e120e19 --- /dev/null +++ b/api/vendor/github.com/go-task/slim-sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/api/vendor/github.com/google/go-cmp/LICENSE b/api/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 000000000..32017f8fa --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/api/vendor/github.com/google/go-cmp/cmp/compare.go b/api/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 000000000..087320da7 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,669 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. +// +// The primary features of cmp are: +// +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. +// +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. +// +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO(≥go1.18): Use any instead of interface{}. + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. +func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added from y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = anyType + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + return &pathStep{t, vx, vy} +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.curPtrs.Init() + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case exporter: + s.exporters = append(s.exporters, opt) + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Cycle-detection for slice elements (see NOTE in compareSlice). + t := step.Type() + vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool + var vax, vay reflect.Value // Addressable versions of vx and vy + + var mayForce, mayForceInit bool + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.paddr = addr + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/export_panic.go b/api/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 000000000..ae851fe53 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,16 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego +// +build purego + +package cmp + +import "reflect" + +const supportExporters = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { + panic("no support for forcibly accessing unexported fields") +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/api/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 000000000..e2c0f74e8 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,36 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportExporters = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/api/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 000000000..36062a604 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,18 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cmp_debug +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/api/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 000000000..a3b97a1ad --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,123 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cmp_debug +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/api/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 000000000..a248e5436 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,402 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Non-deterministically start with either the forward or reverse direction + // to introduce some deliberate instability so that we have the flexibility + // to change this algorithm in the future. + if flags.Deterministic || randBool { + goto forwardSearch + } else { + goto reverseSearch + } + +forwardSearch: + { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + goto finishSearch + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + goto reverseSearch + } + +reverseSearch: + { + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + goto finishSearch + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + goto forwardSearch + } + +finishSearch: + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/api/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 000000000..d8e459c9b --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/api/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 000000000..d127d4362 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/api/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 000000000..7b498bb2c --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,164 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "reflect" + "strconv" +) + +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/api/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 000000000..1a71bfcbd --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,34 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/api/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 000000000..16e6860af --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,37 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/api/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 000000000..98533b036 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/options.go b/api/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 000000000..1f9ca9c48 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,554 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") + } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return exporter(func(t reflect.Type) bool { return m[t] }) +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Report (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc + reportByCycle +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/path.go b/api/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 000000000..a0a588502 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,380 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := value.TypeString(ps.typ, false) + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressable) + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int + isSlice bool // False for reflect.Array +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a separate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/report.go b/api/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 000000000..f43cd12eb --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,54 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/report_compare.go b/api/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 000000000..2050bf6b4 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,433 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" +) + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 6 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else if opts.verbosity() < 3 { + opts = opts.WithVerbosity(3) + } + + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + + // For leaf nodes, format the value based on the reflect.Values alone. + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, parentKind, ptrs) + case diffInserted: + return opts.FormatValue(v.ValueY, parentKind, ptrs) + default: + panic("invalid diff mode") + } + } + + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) + case reflect.Ptr: + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} + case reflect.Interface: + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + return out + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() + case diffRemoved: + isZero = r.Value.ValueX.IsZero() + case diffInserted: + isZero = r.Value.ValueY.IsZero() + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value, ptrs); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var numDiffs int + var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) + groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) + } + default: + out := opts.FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + } + recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/report_references.go b/api/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 000000000..be31b33a9 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/api/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 000000000..2ab41fad3 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,414 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := value.TypeString(t, opts.QualifiedNames) + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } + + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") + if hasParens || hasBraces { + return s + } + } + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return opts.formatString("", v.String()) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(value.PointerOf(v), true)) + case reflect.Struct: + var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if vv.IsZero() { + continue // Elide fields with zero values + } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + + // Check whether this is a []byte of text data. + if t.Elem() == byteType { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + skipType = true + return opts.FormatType(t, out) + } + } + + fallthrough + case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for i := 0; i < v.Len(); i++ { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) + list = append(list, textRecord{Value: s}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out + case reflect.Map: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) + } + defer ptrs.Pop() + + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) + list = append(list, textRecord{Key: sk, Value: sv}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out + case reflect.Ptr: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} + } + defer ptrs.Pop() + + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { + var opts formatOptions + opts.DiffMode = diffIdentical + opts.TypeMode = elideType + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + opts.VerbosityLevel = maxVerbosityPreset + opts.LimitVerbosity = true + s := opts.FormatValue(v, reflect.Map, ptrs).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/report_slices.go b/api/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 000000000..23e444f62 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,614 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false + } + + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + } + + // Check whether we provide specialized diffing for this type. + switch t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 32 + return vx.Len() >= minLength && vy.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } + + // Auto-detect the type of the data. + var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isString = true + case t.Kind() == reflect.Slice && t.Elem() == byteType: + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isString = true + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int + for i, r := range sx + sy { + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 + } + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isPureLinedText: + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace + // + // For example: + // + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != stringType { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isMostlyText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if !isMostlyText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != stringType { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != bytesType { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + len0 := len(list) + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) + } + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { + groups = append(groups, diffStats{Name: name}) + prevMode = mode + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats('=').NumIdentical++ + case diff.UniqueX: + lastStats('!').NumRemoved++ + case diff.UniqueY: + lastStats('!').NumInserted++ + case diff.Modified: + lastStats('!').NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { + numLeadingIdentical++ + } + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/api/vendor/github.com/google/go-cmp/cmp/report_text.go b/api/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 000000000..388fcf571 --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,432 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +const maxColumnLength = 80 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting +} + +func (s *textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s *textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := !ds.IsZero() + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, + ) + + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.ElideComma { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/api/vendor/github.com/google/go-cmp/cmp/report_value.go b/api/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 000000000..668d470fd --- /dev/null +++ b/api/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/api/vendor/github.com/google/pprof/AUTHORS b/api/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 000000000..fd736cb1c --- /dev/null +++ b/api/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/api/vendor/github.com/google/pprof/CONTRIBUTORS b/api/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 000000000..8c8c37d2c --- /dev/null +++ b/api/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/api/vendor/github.com/google/pprof/LICENSE b/api/vendor/github.com/google/pprof/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/api/vendor/github.com/google/pprof/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/api/vendor/github.com/google/pprof/profile/encode.go b/api/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 000000000..ab7f03ae2 --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,567 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + var tmp []Line + x.Line = append(tmp, x.Line...) // Shrink to allocated size + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + for _, s := range p.Sample { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + s.Location = make([]*Location, len(s.locationIDX)) + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/api/vendor/github.com/google/pprof/profile/filter.go b/api/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 000000000..ea8e66c68 --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,270 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/api/vendor/github.com/google/pprof/profile/index.go b/api/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 000000000..bef1d6046 --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/api/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/api/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 000000000..91f45e53c --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/api/vendor/github.com/google/pprof/profile/legacy_profile.go b/api/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 000000000..0c8f3bb5b --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1225 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// The last stack trace is of the form: +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + line = "" + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/api/vendor/github.com/google/pprof/profile/merge.go b/api/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 000000000..9978e7330 --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,481 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID map[uint64]*Location + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping. Add current values to the + // existing sample. + k := s.key() + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +// key generates sampleKey to be used as a key for maps. +func (sample *Sample) key() sampleKey { + ids := make([]string, len(sample.Location)) + for i, l := range sample.Location { + ids[i] = strconv.FormatUint(l.ID, 16) + } + + labels := make([]string, 0, len(sample.Label)) + for k, v := range sample.Label { + labels = append(labels, fmt.Sprintf("%q%q", k, v)) + } + sort.Strings(labels) + + numlabels := make([]string, 0, len(sample.NumLabel)) + for k, v := range sample.NumLabel { + numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + } + sort.Strings(numlabels) + + return sampleKey{ + strings.Join(ids, "|"), + strings.Join(labels, ""), + strings.Join(numlabels, ""), + } +} + +type sampleKey struct { + locations string + labels string + numlabels string +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l, ok := pm.locationsByID[src.ID]; ok { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID[src.ID] = ll + return ll + } + pm.locationsByID[src.ID] = l + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} diff --git a/api/vendor/github.com/google/pprof/profile/profile.go b/api/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 000000000..2590c8ddb --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,805 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + Label map[string][]string + NumLabel map[string][]int64 + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = ioutil.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/api/vendor/github.com/google/pprof/profile/proto.go b/api/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 000000000..539ad3ab3 --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,370 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + tmp := make([]int64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, int64(u)) + } + *x = append(*x, tmp...) + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + tmp := make([]uint64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, u) + } + *x = append(*x, tmp...) + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/api/vendor/github.com/google/pprof/profile/prune.go b/api/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 000000000..02d21a818 --- /dev/null +++ b/api/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,178 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + break + } + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/.gitignore b/api/vendor/github.com/onsi/ginkgo/v2/.gitignore index edf0231cd..18793c248 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/.gitignore +++ b/api/vendor/github.com/onsi/ginkgo/v2/.gitignore @@ -1,5 +1,5 @@ .DS_Store -TODO.md +TODO tmp/**/* *.coverprofile .vscode diff --git a/api/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/api/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index ba938a450..f4671ec1c 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/api/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,403 @@ +## 2.9.7 + +### Fixes +- fix race when multiple defercleanups are called in goroutines [07fc3a0] + +## 2.9.6 + +### Fixes +- fix: create parent directory before report files (#1212) [0ac65de] + +### Maintenance +- Bump github.com/onsi/gomega from 1.27.6 to 1.27.7 (#1202) [3e39231] + +## 2.9.5 + +### Fixes +- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b] + +### Maintenance +- fix generators link (#1200) [9f9d8b9] +- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2] +- fix spelling err in docs (#1199) [0013b1a] +- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5] + +## 2.9.4 + +### Fixes +- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit. + +- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite. + + +### Maintenance +- Document run order when multiple setup nodes are at the same nesting level [903be81] + +## 2.9.3 + +### Features +- Add RenderTimeline to GinkgoT() [c0c77b6] + +### Fixes +- update Measure deprecation message. fixes #1176 [227c662] +- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c] + +### Maintenance +- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab] +- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4] +- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793] +- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b] +- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64] +- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557] +- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5] + +## 2.9.2 + +### Maintenance +- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf] +- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe] + +## 2.9.1 + +### Fixes +This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995) +- Support -coverpkg=./... [26ca1b5] +- document coverpkg a bit more clearly [fc44c3b] + +### Maintenance +- bump various dependencies +- Improve Documentation and fix typo (#1158) [93de676] + +## 2.9.0 + +### Features +- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe] + +- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b] + + The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality. + +## 2.8.4 + +### Features +- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2] +- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589] + +### Fixes +- rename tools hack to see if it fixes things for downstream users [a8bb39a] + +### Maintenance +- Bump golang.org/x/text (#1144) [41b2a8a] +- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583] + +## 2.8.3 + +Released to fix security issue in golang.org/x/net dependency + +### Maintenance + +- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e] +- remove tools.go hack from documentation [0718693] + +## 2.8.2 + +Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod. + +### Maintenance + +- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a] +- Fix minor typos (#1138) [e1e9723] +- Fix link in V2 Migration Guide (#1137) [a588f60] + +## 2.8.1 + +### Fixes +- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a] +- don't run ReportEntries through sprintf [febbe38] + +### Maintenance +- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860] +- test: update matrix for Go 1.20 (#1130) [4890a62] +- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638] +- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd] +- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649] +- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042] +- Update index.md with instructions on how to upgrade Ginkgo [833a75e] + +## 2.8.0 + +### Features + +- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556] + +Modeled after `testing.T.Helper()`. Now, rather than write code like: + +```go +func helper(model Model) { + Expect(model).WithOffset(1).To(BeValid()) + Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/)) +} +``` + +you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write: + +```go +func helper(model Model) { + GinkgoHelper() + Expect(model).To(BeValid()) + Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/)) +} +``` + +- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c] + +You can now write code like this: + +```go +BeforeSuite(func() { + if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) { + // do slow setup + } + + if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) { + // do fast setup + } +}) +``` + +to programmatically check whether a given set of labels will match the configured `--label-filter`. + +### Maintenance + +- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e] +- cdeql: add ruby language (#1124) [9dd275b] +- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd] + +## 2.7.1 + +### Fixes +- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0] + +### Maintenance +- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6] +- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2] +- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa] +- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480] + +## 2.7.0 + +### Features +- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails. +- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242] +- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98] +- Color aliases for custom color support (#1101) [49fab7a] + +### Fixes +- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20] +- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container. +- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b] + +## 2.6.1 + +### Features +- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1] + +### Fixes +- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2] + +### Maintenance +- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823] +- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e] + +## 2.6.0 + +### Features +- `ReportBeforeSuite` provides access to the suite report before the suite begins. +- Add junit config option for omitting leafnodetype (#1088) [956e6d2] +- Add support to customize junit report config to omit spec labels (#1087) [de44005] + +### Fixes +- Fix stack trace pruning so that it has a chance of working on windows [2165648] + +## 2.5.1 + +### Fixes +- skipped tests only show as 'S' when running with -v [3ab38ae] +- Fix typo in docs/index.md (#1082) [55fc58d] +- Fix typo in docs/index.md (#1081) [8a14f1f] +- Fix link notation in docs/index.md (#1080) [2669612] +- Fix typo in `--progress` deprecation message (#1076) [b4b7edc] + +### Maintenance +- chore: Included githubactions in the dependabot config (#976) [baea341] +- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297] + +## 2.5.0 + +### Ginkgo output now includes a timeline-view of the spec + +This commit changes Ginkgo's default output. Spec details are now +presented as a **timeline** that includes events that occur during the spec +lifecycle interleaved with any GinkgoWriter content. This makes is much easier +to understand the flow of a spec and where a given failure occurs. + +The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags +and the SuppressProgressReporting decorator have all been deprecated. Instead +the existing -v and -vv flags better capture the level of verbosity to display. However, +a new --show-node-events flag is added to include node `> Enter` and `< Exit` events +in the spec timeline. + +In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit +reports can be configured and generated using +`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)` + +Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that +was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format +to build tooling on top of as it has stronger guarantees to be stable from version to version. + +### Features +- Provide details about which timeout expired [0f2fa27] + +### Fixes +- Add Support Policy to docs [c70867a] + +### Maintenance +- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2] + +## 2.4.0 + +### Features + +- DeferCleanup supports functions with multiple-return values [5e33c75] +- Add GinkgoLogr (#1067) [bf78c28] +- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f] + +### Fixes +- correcting some typos (#1064) [1403d3c] +- fix flaky internal_integration interrupt specs [2105ba3] +- Correct busted link in README [be6b5b9] + +### Maintenance +- Bump actions/checkout from 2 to 3 (#1062) [8a2f483] +- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8] +- Bump github/codeql-action from 1 to 2 (#1061) [da09146] +- Bump actions/setup-go from 2 to 3 (#1060) [918040d] +- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d] +- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122] +- Add GHA to dependabot config [4442772] + +## 2.3.1 + +## Fixes +Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository). + +With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message. + +- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f] +- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8] + +### Maintenance +- bump gomega to v1.22.0 [822a937] + +## 2.3.0 + +### Interruptible Nodes and Timeouts + +Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this: + +```go +It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid. + // do things until `ctx.Done()` is closed, for example: + req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil) + Expect(err).NotTo(HaveOccured()) + _, err := http.DefaultClient.Do(req) + Expect(err).NotTo(HaveOccured()) + + Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17)) +}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second)) +``` + +and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline. + +### Features + +- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures. +- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested. +- DescribeTable now exits with an error if it is not passed any Entries [a4c9865] + +## Fixes +- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5] +- Make the outline command able to use the DSL import [1be2427] + +## Maintenance +- chore(docs): delete no meaning d [57c373c] +- chore(docs): Fix hyperlinks [30526d5] +- chore(docs): fix code blocks without language settings [cf611c4] +- fix intra-doc link [b541bcb] + +## 2.2.0 + +### Generate real-time Progress Reports [f91377c] + +Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines. + +These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`. + +In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators. + +Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports. + +Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously. + +### Features +- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes. + + As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature. + +### Maintenance +- Modernize the invocation of Ginkgo in github actions [0ffde58] +- Update reocmmended CI settings in docs [896bbb9] +- Speed up unnecessarily slow integration test [6d3a90e] + +## 2.1.6 + +### Fixes +- Add `SuppressProgressReporting` decorator to turn off --progress announcements for a given node [dfef62a] +- chore: remove duplicate word in comments [7373214] + +## 2.1.5 + +### Fixes +- drop -mod=mod instructions; fixes #1026 [6ad7138] +- Ensure `CurrentSpecReport` and `AddReportEntry` are thread-safe [817c09b] +- remove stale importmap gcflags flag test [3cd8b93] +- Always emit spec summary [5cf23e2] - even when only one spec has failed +- Fix ReportAfterSuite usage in docs [b1864ad] +- fixed typo (#997) [219cc00] +- TrimRight is not designed to trim Suffix [71ebb74] +- refactor: replace strings.Replace with strings.ReplaceAll (#978) [143d208] +- fix syntax in examples (#975) [b69554f] + +### Maintenance +- Bump github.com/onsi/gomega from 1.20.0 to 1.20.1 (#1027) [e5dfce4] +- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#1006) [7ae91c4] +- Bump github.com/onsi/gomega from 1.19.0 to 1.20.0 (#1005) [e87a85a] +- test: add new Go 1.19 to test matrix (#1014) [bbefe12] +- Bump golang.org/x/tools from 0.1.11 to 0.1.12 (#1012) [9327906] +- Bump golang.org/x/tools from 0.1.10 to 0.1.11 (#993) [f44af96] +- Bump nokogiri from 1.13.3 to 1.13.6 in /docs (#981) [ef336aa] + +## 2.1.4 + +### Fixes +- Numerous documentation typos +- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903] +- improve error message when a parallel process fails to report back [a7bd1fe] +- guard against concurrent map writes in DeprecationTracker [0976569] +- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480] +- Fix ginkgo import circle [f779385] + ## 2.1.3 See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. @@ -39,7 +439,7 @@ See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkg Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC. 1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess -You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc` +You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc` ## 1.16.4 @@ -146,7 +546,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4] - improve ginkgo performance - makes progress on #644 [a14f98e] - fix convert integration tests [1f8ba69] -- fix typo succesful -> successful (#663) [1ea49cf] +- fix typo successful -> successful (#663) [1ea49cf] - Fix invalid link (#658) [b886136] - convert utility : Include comments from source (#657) [1077c6d] - Explain what BDD means [d79e7fb] @@ -240,7 +640,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6] - all: gofmt [000d317] - Increase eventually timeout to 30s [c73579c] -- Clarify asynchronous test behaviour [294d8f4] +- Clarify asynchronous test behavior [294d8f4] - Travis badge should only show master [26d2143] ## 1.5.0 5/10/2018 @@ -258,13 +658,13 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0] - `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd] - Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98] -- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c] +- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c] - Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b] - Add an extra new line after reporting spec run completion for test2json [874520d] - added name name field to junit reported testsuite [ae61c63] - Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856] - Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe] -- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] +- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] - Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed] - Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8] - Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598] diff --git a/api/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/api/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 15079406e..1da92fe7e 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/api/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -8,6 +8,6 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. - Make sure all the tests succeed via `ginkgo -r -p` - Vet your changes via `go vet ./...` -- Update the documentation. Ginko uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. Thanks for supporting Ginkgo! \ No newline at end of file diff --git a/api/vendor/github.com/onsi/ginkgo/v2/README.md b/api/vendor/github.com/onsi/ginkgo/v2/README.md index 58507c36f..d0473a467 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/api/vendor/github.com/onsi/ginkgo/v2/README.md @@ -4,11 +4,7 @@ --- -# Ginkgo 2.0 is now Generally Available! - -You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)! - ---- +# Ginkgo Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly: @@ -33,53 +29,53 @@ Describe("Checking books out of the library", Label("library"), func() { }) When("the library has the book in question", func() { - BeforeEach(func() { - Expect(library.Store(book)).To(Succeed()) + BeforeEach(func(ctx SpecContext) { + Expect(library.Store(ctx, book)).To(Succeed()) }) Context("and the book is available", func() { - It("lends it to the reader", func() { - Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) + It("lends it to the reader", func(ctx SpecContext) { + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) Expect(valjean.Books()).To(ContainElement(book)) - Expect(library.UserWithBook(book)).To(Equal(valjean)) - }) + Expect(library.UserWithBook(ctx, book)).To(Equal(valjean)) + }, SpecTimeout(time.Second * 5)) }) Context("but the book has already been checked out", func() { var javert *users.User - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { javert = users.NewUser("Javert") - Expect(javert.Checkout(library, "Les Miserables")).To(Succeed()) + Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed()) }) - It("tells the user", func() { - err := valjean.Checkout(library, "Les Miserables") + It("tells the user", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") Expect(error).To(MatchError("Les Miserables is currently checked out")) - }) + }, SpecTimeout(time.Second * 5)) - It("lets the user place a hold and get notified later", func() { - Expect(valjean.Hold(library, "Les Miserables")).To(Succeed()) - Expect(valjean.Holds()).To(ContainElement(book)) + It("lets the user place a hold and get notified later", func(ctx SpecContext) { + Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Holds(ctx)).To(ContainElement(book)) By("when Javert returns the book") - Expect(javert.Return(library, book)).To(Succeed()) + Expect(javert.Return(ctx, library, book)).To(Succeed()) By("it eventually informs Valjean") notification := "Les Miserables is ready for pick up" - Eventually(valjean.Notifications).Should(ContainElement(notification)) + Eventually(ctx, valjean.Notifications).Should(ContainElement(notification)) - Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) - Expect(valjean.Books()).To(ContainElement(book)) - Expect(valjean.Holds()).To(BeEmpty()) - }) + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books(ctx)).To(ContainElement(book)) + Expect(valjean.Holds(ctx)).To(BeEmpty()) + }, SpecTimeout(time.Second * 10)) }) }) When("the library does not have the book in question", func() { - It("tells the reader the book is unavailable", func() { - err := valjean.Checkout(library, "Les Miserables") + It("tells the reader the book is unavailable", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") Expect(error).To(MatchError("Les Miserables is not in the library catalog")) - }) + }, SpecTimeout(time.Second * 5)) }) }) ``` @@ -90,9 +86,9 @@ If you have a question, comment, bug report, feature request, etc. please open a ## Capabilities -Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://olivinelabs.com/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. +Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. -With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs) +With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs). At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as @@ -100,7 +96,7 @@ At runtime, Ginkgo can run your specs in reproducibly [random order](https://ons ginkgo -p ``` -By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. +By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up. As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). diff --git a/api/vendor/github.com/onsi/ginkgo/v2/RELEASING.md b/api/vendor/github.com/onsi/ginkgo/v2/RELEASING.md index 0c80f668d..363815d7c 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/RELEASING.md +++ b/api/vendor/github.com/onsi/ginkgo/v2/RELEASING.md @@ -1,7 +1,13 @@ A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: 1. Ensure CHANGELOG.md is up to date. - - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Use + ```bash + LAST_VERSION=$(git tag --sort=version:refname | tail -n1) + CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + ``` + to update the changelog - Categorize the changes into - Breaking Changes (requires a major version) - New Features (minor version) diff --git a/api/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/api/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index c8a1a76a6..a244bdc18 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -21,8 +21,8 @@ import ( "os" "path/filepath" "strings" - "time" + "github.com/go-logr/logr" "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" "github.com/onsi/ginkgo/v2/internal/global" @@ -46,7 +46,9 @@ func init() { var err error flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig) exitIfErr(err) - GinkgoWriter = internal.NewWriter(os.Stdout) + writer := internal.NewWriter(os.Stdout) + GinkgoWriter = writer + GinkgoLogr = internal.GinkgoLogrFunc(writer) } func exitIfErr(err error) { @@ -77,7 +79,7 @@ func exitIfErrors(errors []error) { } } -//The interface implemented by GinkgoWriter +// The interface implemented by GinkgoWriter type GinkgoWriterInterface interface { io.Writer @@ -89,6 +91,15 @@ type GinkgoWriterInterface interface { ClearTeeWriters() } +/* +SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo). + +You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods. + +Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation. +*/ +type SpecContext = internal.SpecContext + /* GinkgoWriter implements a GinkgoWriterInterface and io.Writer @@ -103,7 +114,12 @@ You can learn more at https://onsi.github.io/ginkgo/#logging-output */ var GinkgoWriter GinkgoWriterInterface -//The interface by which Ginkgo receives *testing.T +/* +GinkgoLogr is a logr.Logger that writes to GinkgoWriter +*/ +var GinkgoLogr logr.Logger + +// The interface by which Ginkgo receives *testing.T type GinkgoTestingT interface { Fail() } @@ -147,6 +163,29 @@ func GinkgoParallelProcess() int { return suiteConfig.ParallelProcess } +/* +GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred. + +This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega. +*/ +func GinkgoHelper() { + types.MarkAsHelper(1) +} + +/* +GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`. + +You can use this to manually check if a set of labels would satisfy the filter via: + + if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) { + //... + } +*/ +func GinkgoLabelFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.LabelFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -168,7 +207,7 @@ func PauseOutputInterception() { outputInterceptor.PauseIntercepting() } -//ResumeOutputInterception() - see docs for PauseOutputInterception() +// ResumeOutputInterception() - see docs for PauseOutputInterception() func ResumeOutputInterception() { if outputInterceptor == nil { return @@ -259,7 +298,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { } writer := GinkgoWriter.(*internal.Writer) - if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 { + if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 { writer.SetMode(internal.WriterModeStreamAndBuffer) } else { writer.SetMode(internal.WriterModeBufferOnly) @@ -277,7 +316,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(suiteConfig.Timeout, client), client, suiteConfig) + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) outputInterceptor.Shutdown() flagSet.ValidateDeprecations(deprecationTracker) @@ -353,6 +392,12 @@ func AbortSuite(message string, callerSkip ...int) { panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) } +/* +ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling +the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer. +*/ +type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() } + /* GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that @@ -368,6 +413,9 @@ You can learn more about how Ginkgo manages failures here: https://onsi.github.i func GinkgoRecover() { e := recover() if e != nil { + if _, ok := e.(ignorablePanic); ok { + return + } global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e) } } @@ -421,13 +469,31 @@ var XDescribe = PDescribe var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe /* When is an alias for Describe - it generates the exact same kind of Container node */ -var When, FWhen, PWhen, XWhen = Describe, FDescribe, PDescribe, XDescribe +func When(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func FWhen(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func PWhen(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +var XWhen = PWhen /* It nodes are Subject nodes that contain your spec code and assertions. Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure. +You can pass It nodes bare functions (func() {}) or functions that receive a SpecContext or context.Context: func(ctx SpecContext) {} and func (ctx context.Context) {}. If the function takes a context then the It is deemed interruptible and Ginkgo will cancel the context in the event of a timeout (configured via the SpecTimeout() or NodeTimeout() decorators) or of an interrupt signal. + You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ @@ -474,30 +540,11 @@ and will simply log the passed in text to the GinkgoWriter. If By is handed a f By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports. -Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry +Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by */ func By(text string, callback ...func()) { - if !global.Suite.InRunPhase() { - exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1))) - } - value := struct { - Text string - Duration time.Duration - }{ - Text: text, - } - t := time.Now() - AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t) - formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor) - GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT))) - if len(callback) == 1 { - callback[0]() - value.Duration = time.Since(t) - } - if len(callback) > 1 { - panic("just one callback per By, please") - } + exitIfErr(global.Suite.By(text, callback...)) } /* @@ -506,11 +553,15 @@ When running in parallel, each parallel process will call BeforeSuite. You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. +BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func BeforeSuite(body func()) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", body)) +func BeforeSuite(body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) } /* @@ -521,11 +572,15 @@ When running in parallel, each parallel process will call AfterSuite. You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. +AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func AfterSuite(body func()) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", body)) +func AfterSuite(body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) } /* @@ -536,19 +591,34 @@ information from that setup to all parallel processes. SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them. The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures: -The first function (which only runs on process #1) has the signature: +The first function (which only runs on process #1) can have any of the following the signatures: + func() + func(ctx context.Context) + func(ctx SpecContext) func() []byte + func(ctx context.Context) []byte + func(ctx SpecContext) []byte -The byte array returned by the first function is then passed to the second function, which has the signature: +The byte array returned by the first function (if present) is then passed to the second function, which can have any of the following signature: + func() + func(ctx context.Context) + func(ctx SpecContext) func(data []byte) + func(ctx context.Context, data []byte) + func(ctx SpecContext, data []byte) + +If either function receives a context.Context/SpecContext it is considered interruptible. You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedBeforeSuite(process1Body func() []byte, allProcessBody func([]byte)) bool { - return pushNode(internal.NewSynchronizedBeforeSuiteNode(process1Body, allProcessBody, types.NewCodeLocation(1))) +func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{process1Body, allProcessBody} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) } /* @@ -557,21 +627,26 @@ and a piece that must only run once - on process #1. SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1 and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until -all other processes are finished. +all other processes are finished. These two functions can be bare functions (func()) or interruptible (func(context.Context)/func(SpecContext)) Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results. You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedAfterSuite(allProcessBody func(), process1Body func()) bool { - return pushNode(internal.NewSynchronizedAfterSuiteNode(allProcessBody, process1Body, types.NewCodeLocation(1))) +func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{allProcessBody, process1Body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) } /* BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes are defined in nested Container nodes the outermost BeforeEach node closures are run first. +BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach */ @@ -583,6 +658,8 @@ func BeforeEach(args ...interface{}) bool { JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure. This can allow you to separate configuration from creation of resources for a spec. +JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach */ @@ -596,6 +673,8 @@ are defined in nested Container nodes the innermost AfterEach node closures are Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results. +AfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup */ @@ -606,6 +685,8 @@ func AfterEach(args ...interface{}) bool { /* JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec. +JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach */ @@ -614,10 +695,12 @@ func JustAfterEach(args ...interface{}) bool { } /* -BeforeAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once before any specs in the Ordered container run. +BeforeAll nodes are Setup nodes that can occur inside Ordered containers. They run just once before any specs in the Ordered container run. Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. +BeforeAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall @@ -627,12 +710,14 @@ func BeforeAll(args ...interface{}) bool { } /* -AfterAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once after all specs in the Ordered container have run. +AfterAll nodes are Setup nodes that can occur inside Ordered containers. They run just once after all specs in the Ordered container have run. Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior. +AfterAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall @@ -646,15 +731,32 @@ DeferCleanup can be called within any Setup or Subject node to register a cleanu DeferCleanup can be passed: 1. A function that takes no arguments and returns no values. -2. A function that returns an error (in which case it will assert that the returned error was nil, or it will fail the spec). -3. A function that takes arguments (and optionally returns an error) followed by a list of arguments to passe to the function. For example: +2. A function that returns multiple values. `DeferCleanup` will ignore all these return values except for the last one. If this last return value is a non-nil error `DeferCleanup` will fail the spec). +3. A function that takes a context.Context or SpecContext (and optionally returns multiple values). The resulting cleanup node is deemed interruptible and the passed-in context will be cancelled in the event of a timeout or interrupt. +4. A function that takes arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function. +5. A function that takes SpecContext and a list of arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function. + +For example: + + BeforeEach(func() { + DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) + os.SetEnv("FOO", "BAR") + }) - BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") - }) +will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. -will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. +Similarly: + + BeforeEach(func() { + DeferCleanup(func(ctx SpecContext, path) { + req, err := http.NewRequestWithContext(ctx, "POST", path, nil) + Expect(err).NotTo(HaveOccured()) + _, err := http.DefaultClient.Do(req) + Expect(err).NotTo(HaveOccured()) + }, "example.com/cleanup", NodeTimeout(time.Second*3)) + }) + +will register a cleanup handler that will have three seconds to successfully complete a request to the specified path. Note that we do not specify a context in the list of arguments passed to DeferCleanup - only in the signature of the function we pass in. Ginkgo will detect the requested context and supply a SpecContext when it invokes the cleanup node. If you want to pass in your own context in addition to the Ginkgo-provided SpecContext you must specify the SpecContext as the first argument (e.g. func(ctx SpecContext, otherCtx context.Context)). When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node) When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node) @@ -667,5 +769,26 @@ func DeferCleanup(args ...interface{}) { fail := func(message string, cl types.CodeLocation) { global.Failer.Fail(message, cl) } - pushNode(internal.NewCleanupNode(fail, args...)) + pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...)) +} + +/* +AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report. + +**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo** + +Progress Reports are generated: +- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`) +- on nodes decorated with PollProgressAfter +- on suites run with --poll-progress-after +- whenever a test times out + +Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information. + +# AttachProgressReporter returns a function that can be called to detach the progress reporter + +You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports +*/ +func AttachProgressReporter(reporter func() string) func() { + return global.Suite.AttachProgressReporter(reporter) } diff --git a/api/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/api/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index f23e526f3..c65af4ce1 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -13,13 +13,21 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat type Offset = internal.Offset /* -FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. +FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. -You can learn more here: https://onsi.github.io/ginkgo/#repeating-spec-runs-and-managing-flaky-specs +You can learn more here: https://onsi.github.io/ginkgo/#the-flakeattempts-decorator You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference */ type FlakeAttempts = internal.FlakeAttempts +/* +MustPassRepeatedly(uint N) is a decorator that allows you to repeat the execution of individual specs or spec containers. Ginkgo will run them up to `N` times until they fail. + +You can learn more here: https://onsi.github.io/ginkgo/#the-mustpassrepeatedly-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type MustPassRepeatedly = internal.MustPassRepeatedly + /* Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe. @@ -38,7 +46,7 @@ const Pending = internal.Pending /* Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs. -Tests in ordered containers cannot be marked as serial - mark the ordered container instead. +Specs in ordered containers cannot be marked as serial - mark the ordered container instead. You can learn more here: https://onsi.github.io/ginkgo/#serial-specs You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference @@ -46,7 +54,7 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat const Serial = internal.Serial /* -Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear. +Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear. They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs. You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers @@ -54,12 +62,22 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat */ const Ordered = internal.Ordered +/* +ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed. + +ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error. + +You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const ContinueOnFailure = internal.ContinueOnFailure + /* OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container. The behavior for non-Ordered containers/specs is unchanged. -You can learh more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator +You can learn more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference */ const OncePerOrdered = internal.OncePerOrdered @@ -80,3 +98,46 @@ Labels are the type for spec Label decorators. Use Label(...) to construct Labe You can learn more here: https://onsi.github.io/ginkgo/#spec-labels */ type Labels = internal.Labels + +/* +PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. + +Ginkgo will start emitting node progress if the node is still running after a duration of PollProgressAfter. This allows you to get quicker feedback about the state of a long-running spec. +*/ +type PollProgressAfter = internal.PollProgressAfter + +/* +PollProgressInterval allows you to override the configured value for --poll-progress-interval for a particular node. + +Once a node has been running for longer than PollProgressAfter Ginkgo will emit node progress periodically at an interval of PollProgresInterval. +*/ +type PollProgressInterval = internal.PollProgressInterval + +/* +NodeTimeout allows you to specify a timeout for an indivdiual node. The node cannot be a container and must be interruptible (i.e. it must be passed a function that accepts a SpecContext or context.Context). + +If the node does not exit within the specified NodeTimeout its context will be cancelled. The node wil then have a period of time controlled by the GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec. +*/ +type NodeTimeout = internal.NodeTimeout + +/* +SpecTimeout allows you to specify a timeout for an indivdiual spec. SpecTimeout can only decorate interruptible It nodes. + +All nodes associated with the It node will need to complete before the SpecTimeout has elapsed. Individual nodes (e.g. BeforeEach) may be decorated with different NodeTimeouts - but these can only serve to provide a more stringent deadline for the node in question; they cannot extend the deadline past the SpecTimeout. + +If the spec does not complete within the specified SpecTimeout the currently running node will have its context cancelled. The node wil then have a period of time controlled by that node's GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec. +*/ +type SpecTimeout = internal.SpecTimeout + +/* +GracePeriod denotes the period of time Ginkgo will wait for an interruptible node to exit once an interruption (whether due to a timeout or a user-invoked signal) has occurred. If both the global --grace-period cli flag and a GracePeriod decorator are specified the value in the decorator will take precedence. + +Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will proceed to run subsequent nodes. In the event of a timeout, such leaks will be reported to the user. +*/ +type GracePeriod = internal.GracePeriod + +/* +SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly +if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. +*/ +const SuppressProgressReporting = internal.SuppressProgressReporting diff --git a/api/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/api/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go index d20e5a8ce..f912bbec6 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go @@ -13,7 +13,7 @@ import ( Deprecated: Done Channel for asynchronous testing The Done channel pattern is no longer supported in Ginkgo 2.0. -See here for better patterns for asynchronouse testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing +See here for better patterns for asynchronous testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing */ diff --git a/api/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/api/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 43b16211d..743555dde 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strconv" "strings" ) @@ -50,6 +51,37 @@ func NewWithNoColorBool(noColor bool) Formatter { } func New(colorMode ColorMode) Formatter { + colorAliases := map[string]int{ + "black": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "magenta": 5, + "cyan": 6, + "white": 7, + } + for colorAlias, n := range colorAliases { + colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8 + } + + getColor := func(color, defaultEscapeCode string) string { + color = strings.ToUpper(strings.ReplaceAll(color, "-", "_")) + envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color) + envVarColor := os.Getenv(envVar) + if envVarColor == "" { + return defaultEscapeCode + } + if colorCode, ok := colorAliases[envVarColor]; ok { + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + colorCode, err := strconv.Atoi(envVarColor) + if err != nil || colorCode < 0 || colorCode > 255 { + return defaultEscapeCode + } + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + f := Formatter{ ColorMode: colorMode, colors: map[string]string{ @@ -57,18 +89,18 @@ func New(colorMode ColorMode) Formatter { "bold": "\x1b[1m", "underline": "\x1b[4m", - "red": "\x1b[38;5;9m", - "orange": "\x1b[38;5;214m", - "coral": "\x1b[38;5;204m", - "magenta": "\x1b[38;5;13m", - "green": "\x1b[38;5;10m", - "dark-green": "\x1b[38;5;28m", - "yellow": "\x1b[38;5;11m", - "light-yellow": "\x1b[38;5;228m", - "cyan": "\x1b[38;5;14m", - "gray": "\x1b[38;5;243m", - "light-gray": "\x1b[38;5;246m", - "blue": "\x1b[38;5;12m", + "red": getColor("red", "\x1b[38;5;9m"), + "orange": getColor("orange", "\x1b[38;5;214m"), + "coral": getColor("coral", "\x1b[38;5;204m"), + "magenta": getColor("magenta", "\x1b[38;5;13m"), + "green": getColor("green", "\x1b[38;5;10m"), + "dark-green": getColor("dark-green", "\x1b[38;5;28m"), + "yellow": getColor("yellow", "\x1b[38;5;11m"), + "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"), + "cyan": getColor("cyan", "\x1b[38;5;14m"), + "gray": getColor("gray", "\x1b[38;5;243m"), + "light-gray": getColor("light-gray", "\x1b[38;5;246m"), + "blue": getColor("blue", "\x1b[38;5;12m"), }, } colors := []string{} @@ -88,7 +120,10 @@ func (f Formatter) Fi(indentation uint, format string, args ...interface{}) stri } func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { - out := fmt.Sprintf(f.style(format), args...) + out := f.style(format) + if len(args) > 0 { + out = fmt.Sprintf(out, args...) + } if indentation == 0 && maxWidth == 0 { return out diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go new file mode 100644 index 000000000..5db5d1a7b --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -0,0 +1,63 @@ +package build + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBuildCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "build", + Flags: flags, + Usage: "ginkgo build ", + ShortDoc: "Build the passed in (or the package in the current directory if left blank).", + DocLink: "precompiling-suites", + Command: func(args []string, _ []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + buildSpecs(args, cliConfig, goFlagsConfig) + }, + } +} + +func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + internal.VerifyCLIAndFrameworkVersion(suites) + + opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, goFlagsConfig) + + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break + } + suites[suiteIdx] = suite + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + } else { + fmt.Printf("Compiled %s.test\n", suite.PackageName) + } + } + + if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 { + command.AbortWith("Failed to compile all tests") + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 000000000..2efd28608 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 000000000..12e0e5659 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,50 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 000000000..88dd8d6b0 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,182 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + return + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } + return +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 000000000..a367a1fc9 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 000000000..73aff0b7a --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,133 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom boostrap data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = bootstrapTemplate.Execute(buf, data) + command.AbortIfError("Failed to render bootstrap template:", err) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 000000000..48d23f919 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,259 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom template data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = specTemplate.Execute(f, data) + command.AbortIfError("Failed to render bootstrap template:", err) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.ReplaceAll(name, "-", "_") + name = strings.ReplaceAll(name, " ", "_") + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.ReplaceAll(workingDir, modRoot, "") + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 000000000..c3470adbf --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,41 @@ +package generators + +var specText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 000000000..3046a4487 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,64 @@ +package generators + +import ( + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string + CustomTemplateData string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.ReplaceAll(filepath.Base(path), "-", "_") + dirName = strings.ReplaceAll(dirName, " ", "_") + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + name = strings.Title(name) + name = strings.ReplaceAll(name, " ", "") + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 000000000..86da7340d --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,161 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + ginkgoInvocationPath, _ := os.Getwd() + ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) + packagePath := suite.AbsPath() + pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) + return suite + } + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 000000000..bd3c6d028 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +//loads each profile, combines them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + combined := &bytes.Buffer{} + modeRegex := regexp.MustCompile(`^mode: .*\n`) + for i, profile := range profiles { + contents, err := os.ReadFile(profile) + if err != nil { + return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + } + os.Remove(profile) + + // remove the cover mode line from every file + // except the first one + if i > 0 { + contents = modeRegex.ReplaceAll(contents, []byte{}) + } + + _, err = combined.Write(contents) + + // Add a newline to the end of every file if missing. + if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { + _, err = combined.Write([]byte("\n")) + } + + if err != nil { + return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + } + } + + err := os.WriteFile(destination, combined.Bytes(), 0666) + if err != nil { + return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 000000000..41052ea19 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,355 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + // As we run the go test from the suite directory, make sure the cover profile is absolute + // and placed into the expected output directory when one is configured. + if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed) + command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 000000000..64dcb1b78 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,283 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.Match([]byte(file.Name())) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.Match([]byte(file.Name())) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 000000000..bd9ca7d51 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go new file mode 100644 index 000000000..9da1bab3d --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go @@ -0,0 +1,54 @@ +package internal + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`) + +func VerifyCLIAndFrameworkVersion(suites TestSuites) { + cliVersion := types.VERSION + mismatches := map[string][]string{} + + for _, suite := range suites { + cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2") + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + continue + } + components := strings.Split(string(output), " ") + if len(components) != 2 { + continue + } + matches := versiorRe.FindStringSubmatch(components[1]) + if matches == nil || len(matches) != 2 { + continue + } + libraryVersion := matches[1] + if cliVersion != libraryVersion { + mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName) + } + } + + if len(mismatches) == 0 { + return + } + + fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}")) + + fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:")) + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion)) + fmt.Println(formatter.Fi(1, "Mismatched package versions found:")) + for version, packages := range mismatches { + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", "))) + } + fmt.Println("") + fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}")) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 000000000..6c61f09d1 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 000000000..e9abb27d8 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 000000000..0b9b19fe7 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,302 @@ +package outline + +import ( + "github.com/onsi/ginkgo/v2/types" + "go/ast" + "go/token" + "strconv" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` + Labels []string `json:"labels"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} + +func labelFromCallExpr(ce *ast.CallExpr) []string { + + labels := []string{} + if len(ce.Args) < 2 { + return labels + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Label" { + ls := extractLabels(expr) + for _, label := range ls { + labels = append(labels, label) + } + } + } + } + return labels +} + +func extractLabels(expr *ast.CallExpr) []string { + out := []string{} + for _, arg := range expr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + + return out +} + +func pendingFromCallExpr(ce *ast.CallExpr) bool { + + pending := false + if len(ce.Args) < 2 { + return pending + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Pending" { + pending = true + } + case *ast.Ident: + if expr.Name == "Pending" { + pending = true + } + } + } + return pending +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 000000000..67ec5ab75 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if strings.HasPrefix(importPath(s), path) { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 000000000..c2327cda8 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,110 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + var labels string + if len(n.Labels) == 1 { + labels = n.Labels[0] + } else { + labels = strings.Join(n.Labels, ", ") + } + //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 000000000..36698d46a --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 000000000..aaed4d570 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,232 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted() { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = endTime.Sub(time.Now()) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 000000000..7dd294394 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 000000000..6c485c5b1 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 000000000..26418ac62 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 000000000..f5ddff30f --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 000000000..e9f7ec0cb --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,108 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if goTestRegExp.Match([]byte(info.Name())) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.Match([]byte(info.Name())) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 000000000..b4892bebf --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 000000000..53272df7e --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 000000000..bde4193ce --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,192 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted() { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted() { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go new file mode 100644 index 000000000..85162720f --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go @@ -0,0 +1,8 @@ +//go:build ginkgoclidependencies +// +build ginkgoclidependencies + +package ginkgo + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo" +) diff --git a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 1beeb1144..28447ffdd 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -1,26 +1,42 @@ package ginkgo -import "github.com/onsi/ginkgo/v2/internal/testingtproxy" +import ( + "github.com/onsi/ginkgo/v2/internal/testingtproxy" +) /* -GinkgoT() implements an interface analogous to *testing.T and can be used with -third-party libraries that accept *testing.T through an interface. +GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo. + +GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface. GinkgoT() takes an optional offset argument that can be used to get the -correct line number associated with the failure. +correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ -func GinkgoT(optionalOffset ...int) GinkgoTInterface { +func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { offset := 3 if len(optionalOffset) > 0 { offset = optionalOffset[0] } - return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset) + return testingtproxy.New( + GinkgoWriter, + Fail, + Skip, + DeferCleanup, + CurrentSpecReport, + AddReportEntry, + GinkgoRecover, + AttachProgressReporter, + suiteConfig.RandomSeed, + suiteConfig.ParallelProcess, + suiteConfig.ParallelTotal, + reporterConfig.NoColor, + offset) } /* -The interface returned by GinkgoT(). This covers most of the methods in the testing package's T. +The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T. */ type GinkgoTInterface interface { Cleanup(func()) @@ -43,3 +59,36 @@ type GinkgoTInterface interface { Skipped() bool TempDir() string } + +/* +Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo +*/ +type FullGinkgoTInterface interface { + GinkgoTInterface + + AddReportEntryVisibilityAlways(name string, args ...any) + AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) + AddReportEntryVisibilityNever(name string, args ...any) + + //Prints to the GinkgoWriter + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) + + //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo + F(format string, args ...any) string + Fi(indentation uint, format string, args ...any) string + Fiw(indentation uint, maxWidth uint, format string, args ...any) string + + //Generates a formatted string version of the current spec's timeline + RenderTimeline() string + + GinkgoRecover() + DeferCleanup(args ...any) + + RandomSeed() int64 + ParallelProcess() int + ParallelTotal() int + + AttachProgressReporter(func() string) func() +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/group.go index c6546bba4..ae1b7b011 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -94,15 +94,19 @@ type group struct { runOncePairs map[uint]runOncePairs runOnceTracker map[runOncePair]types.SpecState - succeeded bool + succeeded bool + failedInARunOnceBefore bool + continueOnFailure bool } func newGroup(suite *Suite) *group { return &group{ - suite: suite, - runOncePairs: map[uint]runOncePairs{}, - runOnceTracker: map[runOncePair]types.SpecState{}, - succeeded: true, + suite: suite, + runOncePairs: map[uint]runOncePairs{}, + runOnceTracker: map[runOncePair]types.SpecState{}, + succeeded: true, + failedInARunOnceBefore: false, + continueOnFailure: false, } } @@ -116,8 +120,11 @@ func (g *group) initialReportForSpec(spec Spec) types.SpecReport { LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), IsSerial: spec.Nodes.HasNodeMarkedSerial(), IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), } } @@ -128,13 +135,20 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { if spec.Skip { return types.SpecStateSkipped, types.Failure{} } - if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll { + if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll { return types.SpecStateSkipped, types.Failure{} } - if !g.succeeded { + if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) { + return types.SpecStateSkipped, types.Failure{} + } + if !g.succeeded && !g.continueOnFailure { return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), "Spec skipped because an earlier spec in an ordered container failed") } + if g.failedInARunOnceBefore && g.continueOnFailure { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because a BeforeAll node failed") + } beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach) for _, pair := range beforeOncePairs { if g.runOnceTracker[pair].Is(types.SpecStateSkipped) { @@ -162,9 +176,8 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { return lastSpecID == specID } -func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { - interruptStatus := g.suite.interruptHandler.Status() - +func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool { + failedInARunOnceBefore := false pairs := g.runOncePairs[spec.SubjectID()] nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) @@ -173,18 +186,24 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt)) terminatingNode, terminatingPair := Node{}, runOncePair{} + deadline := time.Time{} + if spec.SpecTimeout() > 0 { + deadline = time.Now().Add(spec.SpecTimeout()) + } + for _, node := range nodes { oncePair := pairs.runOncePairFor(node.ID) if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) { continue } - g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) if !oncePair.isZero() { g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State } if g.suite.currentSpecReport.State != types.SpecStatePassed { terminatingNode, terminatingPair = node, oncePair + failedInARunOnceBefore = !terminatingPair.isZero() break } } @@ -207,7 +226,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { //this node has already been run on this attempt, don't rerun it return false } - pair := runOncePair{} + var pair runOncePair switch node.NodeType { case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll: // check if we were generated in an AfterNode that has already run @@ -237,9 +256,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel { return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run } - case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed... if isFinalAttempt { - return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + if g.continueOnFailure { + return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run) + } else { + return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run + } } if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) { @@ -260,26 +283,33 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { for _, node := range nodes { afterNodeWasRun[node.ID] = true - state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node)) + state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { g.suite.currentSpecReport.State = state g.suite.currentSpecReport.Failure = failure + } else if state.Is(types.SpecStateFailureStates) { + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure}) } } includeDeferCleanups = true } + return failedInARunOnceBefore } func (g *group) run(specs Specs) { g.specs = specs + g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure for _, spec := range g.specs { g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec) } for _, spec := range g.specs { + g.suite.selectiveLock.Lock() g.suite.currentSpecReport = g.initialReportForSpec(spec) + g.suite.selectiveLock.Unlock() + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec) g.suite.reporter.WillRun(g.suite.currentSpecReport) g.suite.reportEach(spec, types.NodeTypeReportBeforeEach) @@ -287,28 +317,52 @@ func (g *group) run(specs Specs) { skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending) g.suite.currentSpecReport.StartTime = time.Now() + failedInARunOnceBefore := false if !skip { - maxAttempts := max(1, spec.FlakeAttempts()) - if g.suite.config.FlakeAttempts > 0 { + var maxAttempts = 1 + + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + maxAttempts = max(1, spec.MustPassRepeatedly()) + } else if g.suite.config.FlakeAttempts > 0 { maxAttempts = g.suite.config.FlakeAttempts + g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts + } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + maxAttempts = max(1, spec.FlakeAttempts()) } + for attempt := 0; attempt < maxAttempts; attempt++ { g.suite.currentSpecReport.NumAttempts = attempt + 1 g.suite.writer.Truncate() g.suite.outputInterceptor.StartInterceptingOutput() if attempt > 0 { - fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt}) + } + if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt}) + } } - g.attemptSpec(attempt == maxAttempts-1, spec) + failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec) g.suite.currentSpecReport.EndTime = time.Now() g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime) g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes()) g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput() - if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { - break + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) { + break + } + } + if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { + break + } else if attempt < maxAttempts-1 { + af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure} + af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message) + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af) + } } } } @@ -317,228 +371,10 @@ func (g *group) run(specs Specs) { g.suite.processCurrentSpecReport() if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { g.succeeded = false + g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore } + g.suite.selectiveLock.Lock() g.suite.currentSpecReport = types.SpecReport{} - } -} - -func (g *group) oldRun(specs Specs) { - var suite = g.suite - nodeState := map[uint]types.SpecState{} - groupSucceeded := true - - indexOfLastSpecContainingNodeID := func(id uint) int { - lastIdx := -1 - for idx := range specs { - if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip { - lastIdx = idx - } - } - return lastIdx - } - - for i, spec := range specs { - suite.currentSpecReport = types.SpecReport{ - ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), - ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), - ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), - LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, - LeafNodeType: types.NodeTypeIt, - LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, - LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), - ParallelProcess: suite.config.ParallelProcess, - IsSerial: spec.Nodes.HasNodeMarkedSerial(), - IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), - } - - skip := spec.Skip - if spec.Nodes.HasNodeMarkedPending() { - skip = true - suite.currentSpecReport.State = types.SpecStatePending - } else { - if suite.interruptHandler.Status().Interrupted || suite.skipAll { - skip = true - } - if !groupSucceeded { - skip = true - suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), - "Spec skipped because an earlier spec in an ordered container failed") - } - for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) { - if nodeState[node.ID] == types.SpecStateSkipped { - skip = true - suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), - "Spec skipped because Skip() was called in BeforeAll") - break - } - } - if skip { - suite.currentSpecReport.State = types.SpecStateSkipped - } - } - - if suite.config.DryRun && !skip { - skip = true - suite.currentSpecReport.State = types.SpecStatePassed - } - - suite.reporter.WillRun(suite.currentSpecReport) - //send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks - suite.reportEach(spec, types.NodeTypeReportBeforeEach) - if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { - //the reportEach failed, skip this spec - skip = true - } - - suite.currentSpecReport.StartTime = time.Now() - maxAttempts := max(1, spec.FlakeAttempts()) - if suite.config.FlakeAttempts > 0 { - maxAttempts = suite.config.FlakeAttempts - } - - for attempt := 0; !skip && (attempt < maxAttempts); attempt++ { - suite.currentSpecReport.NumAttempts = attempt + 1 - suite.writer.Truncate() - suite.outputInterceptor.StartInterceptingOutput() - if attempt > 0 { - fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) - } - isFinalAttempt := (attempt == maxAttempts-1) - - interruptStatus := suite.interruptHandler.Status() - deepestNestingLevelAttained := -1 - var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool { - return nodeState[n.ID] != types.SpecStatePassed - }) - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...) - - var terminatingNode Node - for j := range nodes { - deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel) - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j])) - suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) - nodeState[nodes[j].ID] = suite.currentSpecReport.State - if suite.currentSpecReport.State != types.SpecStatePassed { - terminatingNode = nodes[j] - break - } - } - - afterAllNodesThatRan := map[uint]bool{} - // pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes - runAfterAndCleanupNodes := func(nodes Nodes) { - for j := range nodes { - state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j])) - suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) - nodeState[nodes[j].ID] = state - if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { - suite.currentSpecReport.State = state - suite.currentSpecReport.Failure = failure - if state != types.SpecStatePassed { - terminatingNode = nodes[j] - } - } - if nodes[j].NodeType.Is(types.NodeTypeAfterAll) { - afterAllNodesThatRan[nodes[j].ID] = true - } - } - } - - // pull out a helper that captures the logic of whether or not we should run a given After node. - // there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries - shouldRunAfterNode := func(n Node) bool { - if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { - return true - } - var id uint - if n.NodeType.Is(types.NodeTypeAfterAll) { - id = n.ID - if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again. - return false - } - } - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) { - id = n.NodeIDWhereCleanupWasGenerated - } - isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i - - switch suite.currentSpecReport.State { - case types.SpecStatePassed: //we've passed so far... - return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it - case types.SpecStateSkipped: //the spec was skipped by the user... - if isLastSpecWithNode { - return true //...we're the last spec, so we should run the AfterNode - } - if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel { - return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip - } - case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... - if isFinalAttempt { - return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run - } - if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) { - //...we'll be rerunning a BeforeAll so we should cleanup after it if... - if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel { - return true //we're at the same nesting level - } - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { - return true //we're a DeferCleanup generated by it - } - } - if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) { - //...we'll be rerunning an AfterAll so we should cleanup after it if... - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { - return true //we're a DeferCleanup generated by it - } - } - case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted - return true //...that means the test run is over and we should clean up the stack. Run the AfterNode - } - return false - } - - // first pass - run all the JustAfterEach, Aftereach, and AfterAlls. Our shoudlRunAfterNode filter function will clean up the AfterAlls for us. - afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel() - afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...) - afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained) - afterNodes = afterNodes.Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls: - afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // now we run any DeferCleanups - afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse() - afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...) - runAfterAndCleanupNodes(afterNodes) - - // third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls. - afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up - afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - suite.currentSpecReport.EndTime = time.Now() - suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) - suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) - suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() - - if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { - break - } - } - - //send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks - suite.reportEach(spec, types.NodeTypeReportAfterEach) - suite.processCurrentSpecReport() - if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { - groupSucceeded = false - } - suite.currentSpecReport = types.SpecReport{} + g.suite.selectiveLock.Unlock() } } diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index aca7d1c43..8ed86111f 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -1,39 +1,38 @@ package interrupt_handler import ( - "fmt" "os" "os/signal" - "runtime" "sync" "syscall" "time" - "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal/parallel_support" ) -const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second -const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10 -const ABORT_POLLING_INTERVAL = 500 * time.Millisecond -const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second +var ABORT_POLLING_INTERVAL = 500 * time.Millisecond type InterruptCause uint const ( InterruptCauseInvalid InterruptCause = iota - InterruptCauseSignal - InterruptCauseTimeout InterruptCauseAbortByOtherProcess ) +type InterruptLevel uint + +const ( + InterruptLevelUninterrupted InterruptLevel = iota + InterruptLevelCleanupAndReport + InterruptLevelReportOnly + InterruptLevelBailOut +) + func (ic InterruptCause) String() string { switch ic { case InterruptCauseSignal: return "Interrupted by User" - case InterruptCauseTimeout: - return "Interrupted by Timeout" case InterruptCauseAbortByOtherProcess: return "Interrupted by Other Ginkgo Process" } @@ -41,37 +40,51 @@ func (ic InterruptCause) String() string { } type InterruptStatus struct { - Interrupted bool - Channel chan interface{} - Cause InterruptCause + Channel chan interface{} + Level InterruptLevel + Cause InterruptCause +} + +func (s InterruptStatus) Interrupted() bool { + return s.Level != InterruptLevelUninterrupted +} + +func (s InterruptStatus) Message() string { + return s.Cause.String() +} + +func (s InterruptStatus) ShouldIncludeProgressReport() bool { + return s.Cause != InterruptCauseAbortByOtherProcess } type InterruptHandlerInterface interface { Status() InterruptStatus - SetInterruptPlaceholderMessage(string) - ClearInterruptPlaceholderMessage() - InterruptMessageWithStackTraces() string } type InterruptHandler struct { - c chan interface{} - lock *sync.Mutex - interrupted bool - interruptPlaceholderMessage string - interruptCause InterruptCause - client parallel_support.Client - stop chan interface{} + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal + requestAbortCheck chan interface{} } -func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler { +func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { + if len(signals) == 0 { + signals = []os.Signal{os.Interrupt, syscall.SIGTERM} + } handler := &InterruptHandler{ - c: make(chan interface{}), - lock: &sync.Mutex{}, - interrupted: false, - stop: make(chan interface{}), - client: client, + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + requestAbortCheck: make(chan interface{}), + client: client, + signals: signals, } - handler.registerForInterrupts(timeout) + handler.registerForInterrupts() return handler } @@ -79,30 +92,28 @@ func (handler *InterruptHandler) Stop() { close(handler.stop) } -func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { +func (handler *InterruptHandler) registerForInterrupts() { // os signal handling signalChannel := make(chan os.Signal, 1) - signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM) - - // timeout handling - var timeoutChannel <-chan time.Time - var timeoutTimer *time.Timer - if timeout > 0 { - timeoutTimer = time.NewTimer(timeout) - timeoutChannel = timeoutTimer.C - } + signal.Notify(signalChannel, handler.signals...) // cross-process abort handling - var abortChannel chan bool + var abortChannel chan interface{} if handler.client != nil { - abortChannel = make(chan bool) + abortChannel = make(chan interface{}) go func() { pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) for { select { case <-pollTicker.C: if handler.client.ShouldAbort() { - abortChannel <- true + close(abortChannel) + pollTicker.Stop() + return + } + case <-handler.requestAbortCheck: + if handler.client.ShouldAbort() { + close(abortChannel) pollTicker.Stop() return } @@ -114,99 +125,53 @@ func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { }() } - // listen for any interrupt signals - // note that some (timeouts, cross-process aborts) will only trigger once - // for these we set up a ticker to keep interrupting the suite until it ends - // this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up - // get interrupted eventually - go func() { + go func(abortChannel chan interface{}) { var interruptCause InterruptCause - var repeatChannel <-chan time.Time - var repeatTicker *time.Ticker for { select { case <-signalChannel: interruptCause = InterruptCauseSignal - case <-timeoutChannel: - interruptCause = InterruptCauseTimeout - repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT) - if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION { - repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION - } - timeoutTimer.Stop() - repeatTicker = time.NewTicker(repeatInterruptTimeout) - repeatChannel = repeatTicker.C case <-abortChannel: interruptCause = InterruptCauseAbortByOtherProcess - repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION) - repeatChannel = repeatTicker.C - case <-repeatChannel: - //do nothing, just interrupt again using the same interruptCause case <-handler.stop: - if timeoutTimer != nil { - timeoutTimer.Stop() - } - if repeatTicker != nil { - repeatTicker.Stop() - } signal.Stop(signalChannel) return } + abortChannel = nil + handler.lock.Lock() - handler.interruptCause = interruptCause - if handler.interruptPlaceholderMessage != "" { - fmt.Println(handler.interruptPlaceholderMessage) + oldLevel := handler.level + handler.cause = interruptCause + if handler.level == InterruptLevelUninterrupted { + handler.level = InterruptLevelCleanupAndReport + } else if handler.level == InterruptLevelCleanupAndReport { + handler.level = InterruptLevelReportOnly + } else if handler.level == InterruptLevelReportOnly { + handler.level = InterruptLevelBailOut + } + if handler.level != oldLevel { + close(handler.c) + handler.c = make(chan interface{}) } - handler.interrupted = true - close(handler.c) - handler.c = make(chan interface{}) handler.lock.Unlock() } - }() + }(abortChannel) } func (handler *InterruptHandler) Status() InterruptStatus { handler.lock.Lock() - defer handler.lock.Unlock() - - return InterruptStatus{ - Interrupted: handler.interrupted, - Channel: handler.c, - Cause: handler.interruptCause, + status := InterruptStatus{ + Level: handler.level, + Channel: handler.c, + Cause: handler.cause, } -} - -func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) { - handler.lock.Lock() - defer handler.lock.Unlock() - - handler.interruptPlaceholderMessage = message -} - -func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() { - handler.lock.Lock() - defer handler.lock.Unlock() - - handler.interruptPlaceholderMessage = "" -} + handler.lock.Unlock() -func (handler *InterruptHandler) InterruptMessageWithStackTraces() string { - handler.lock.Lock() - out := fmt.Sprintf("%s\n\n", handler.interruptCause.String()) - defer handler.lock.Unlock() - if handler.interruptCause == InterruptCauseAbortByOtherProcess { - return out - } - out += "Here's a stack trace of all running goroutines:\n" - buf := make([]byte, 8192) - for { - n := runtime.Stack(buf, true) - if n < len(buf) { - buf = buf[:n] - break - } - buf = make([]byte, 2*len(buf)) + if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() { + close(handler.requestAbortCheck) + <-status.Channel + return handler.Status() } - out += formatter.Fi(1, "%s", string(buf)) - return out + + return status } diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 289e4dde2..14c7cf54e 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -1,9 +1,11 @@ package internal import ( + "context" "fmt" "reflect" "sort" + "time" "sync" @@ -27,26 +29,38 @@ type Node struct { NodeType types.NodeType Text string - Body func() + Body func(SpecContext) CodeLocation types.CodeLocation NestingLevel int - - SynchronizedBeforeSuiteProc1Body func() []byte - SynchronizedBeforeSuiteAllProcsBody func([]byte) - - SynchronizedAfterSuiteAllProcsBody func() - SynchronizedAfterSuiteProc1Body func() - - ReportEachBody func(types.SpecReport) - ReportAfterSuiteBody func(types.Report) - - MarkedFocus bool - MarkedPending bool - MarkedSerial bool - MarkedOrdered bool - MarkedOncePerOrdered bool - FlakeAttempts int - Labels Labels + HasContext bool + + SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte + SynchronizedBeforeSuiteProc1BodyHasContext bool + SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte) + SynchronizedBeforeSuiteAllProcsBodyHasContext bool + + SynchronizedAfterSuiteAllProcsBody func(SpecContext) + SynchronizedAfterSuiteAllProcsBodyHasContext bool + SynchronizedAfterSuiteProc1Body func(SpecContext) + SynchronizedAfterSuiteProc1BodyHasContext bool + + ReportEachBody func(types.SpecReport) + ReportSuiteBody func(types.Report) + + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedContinueOnFailure bool + MarkedOncePerOrdered bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration NodeIDWhereCleanupWasGenerated uint } @@ -56,18 +70,32 @@ type focusType bool type pendingType bool type serialType bool type orderedType bool +type continueOnFailureType bool type honorsOrderedType bool +type suppressProgressReporting bool const Focus = focusType(true) const Pending = pendingType(true) const Serial = serialType(true) const Ordered = orderedType(true) +const ContinueOnFailure = continueOnFailureType(true) const OncePerOrdered = honorsOrderedType(true) +const SuppressProgressReporting = suppressProgressReporting(true) type FlakeAttempts uint +type MustPassRepeatedly uint type Offset uint type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing type Labels []string +type PollProgressInterval time.Duration +type PollProgressAfter time.Duration +type NodeTimeout time.Duration +type SpecTimeout time.Duration +type GracePeriod time.Duration + +func (l Labels) MatchesLabelFilter(query string) bool { + return types.MustParseLabelFilter(query)(l) +} func UnionOfLabels(labels ...Labels) Labels { out := Labels{} @@ -112,12 +140,28 @@ func isDecoration(arg interface{}) bool { return true case t == reflect.TypeOf(Ordered): return true + case t == reflect.TypeOf(ContinueOnFailure): + return true case t == reflect.TypeOf(OncePerOrdered): return true + case t == reflect.TypeOf(SuppressProgressReporting): + return true case t == reflect.TypeOf(FlakeAttempts(0)): return true + case t == reflect.TypeOf(MustPassRepeatedly(0)): + return true case t == reflect.TypeOf(Labels{}): return true + case t == reflect.TypeOf(PollProgressInterval(0)): + return true + case t == reflect.TypeOf(PollProgressAfter(0)): + return true + case t == reflect.TypeOf(NodeTimeout(0)): + return true + case t == reflect.TypeOf(SpecTimeout(0)): + return true + case t == reflect.TypeOf(GracePeriod(0)): + return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: @@ -138,16 +182,23 @@ func isSliceOfDecorations(slice interface{}) bool { return true } +var contextType = reflect.TypeOf(new(context.Context)).Elem() +var specContextType = reflect.TypeOf(new(SpecContext)).Elem() + func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { baseOffset := 2 node := Node{ - ID: UniqueNodeID(), - NodeType: nodeType, - Text: text, - Labels: Labels{}, - CodeLocation: types.NewCodeLocation(baseOffset), - NestingLevel: -1, + ID: UniqueNodeID(), + NodeType: nodeType, + Text: text, + Labels: Labels{}, + CodeLocation: types.NewCodeLocation(baseOffset), + NestingLevel: -1, + PollProgressAfter: -1, + PollProgressInterval: -1, + GracePeriod: -1, } + errors := []error{} appendError := func(err error) { if err != nil { @@ -176,7 +227,6 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy remainingArgs = []interface{}{} //now process the rest of the args for _, arg := range args { - switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): break //ignore deprecated timeouts @@ -200,16 +250,53 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if !nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered")) } + case t == reflect.TypeOf(ContinueOnFailure): + node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType)) + if !nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure")) + } case t == reflect.TypeOf(OncePerOrdered): node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType)) if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) } + case t == reflect.TypeOf(SuppressProgressReporting): + deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting()) case t == reflect.TypeOf(FlakeAttempts(0)): node.FlakeAttempts = int(arg.(FlakeAttempts)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts")) } + case t == reflect.TypeOf(MustPassRepeatedly(0)): + node.MustPassRepeatedly = int(arg.(MustPassRepeatedly)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly")) + } + case t == reflect.TypeOf(PollProgressAfter(0)): + node.PollProgressAfter = time.Duration(arg.(PollProgressAfter)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter")) + } + case t == reflect.TypeOf(PollProgressInterval(0)): + node.PollProgressInterval = time.Duration(arg.(PollProgressInterval)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval")) + } + case t == reflect.TypeOf(NodeTimeout(0)): + node.NodeTimeout = time.Duration(arg.(NodeTimeout)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout")) + } + case t == reflect.TypeOf(SpecTimeout(0)): + node.SpecTimeout = time.Duration(arg.(SpecTimeout)) + if !nodeType.Is(types.NodeTypeIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout")) + } + case t == reflect.TypeOf(GracePeriod(0)): + node.GracePeriod = time.Duration(arg.(GracePeriod)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) + } case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) @@ -223,23 +310,85 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } case t.Kind() == reflect.Func: - if node.Body != nil { - appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) - trackedFunctionError = true - break - } - isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done))) - if !isValid { - appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) - trackedFunctionError = true - break - } - if t.NumIn() == 0 { - node.Body = arg.(func()) + if nodeType.Is(types.NodeTypeContainer) { + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if t.NumOut() > 0 || t.NumIn() > 0 { + appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + body := arg.(func()) + node.Body = func(SpecContext) { body() } + } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { + if node.ReportEachBody == nil { + node.ReportEachBody = arg.(func(types.SpecReport)) + } else { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { + if node.ReportSuiteBody == nil { + node.ReportSuiteBody = arg.(func(types.Report)) + } else { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) { + if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if node.SynchronizedBeforeSuiteProc1Body == nil { + body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation)) + trackedFunctionError = true + } + node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext + } else if node.SynchronizedBeforeSuiteAllProcsBody == nil { + body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation)) + trackedFunctionError = true + } + node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext + } + } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) { + if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if node.SynchronizedAfterSuiteAllProcsBody == nil { + node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext + } else if node.SynchronizedAfterSuiteProc1Body == nil { + node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext + } } else { - deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation) - deprecatedAsyncBody := arg.(func(Done)) - node.Body = func() { deprecatedAsyncBody(make(Done)) } + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg) + if node.Body == nil { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } } default: remainingArgs = append(remainingArgs, arg) @@ -251,13 +400,36 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } - if node.Body == nil && !node.MarkedPending && !trackedFunctionError { + if node.MarkedContinueOnFailure && !node.MarkedOrdered { + appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)) + } + + hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext + + if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 { + appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType)) + } + + if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) } + + if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + + if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + for _, arg := range remainingArgs { appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg)) } + if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 { + appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType)) + } + if len(errors) > 0 { return Node{}, errors } @@ -265,96 +437,157 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy return node, errors } -func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeSynchronizedBeforeSuite, - SynchronizedBeforeSuiteProc1Body: proc1Body, - SynchronizedBeforeSuiteAllProcsBody: allProcsBody, - CodeLocation: codeLocation, - }, nil -} - -func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeSynchronizedAfterSuite, - SynchronizedAfterSuiteAllProcsBody: allProcsBody, - SynchronizedAfterSuiteProc1Body: proc1Body, - CodeLocation: codeLocation, - }, nil -} - -func NewReportBeforeEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeReportBeforeEach, - ReportEachBody: body, - CodeLocation: codeLocation, - NestingLevel: -1, - }, nil -} - -func NewReportAfterEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeReportAfterEach, - ReportEachBody: body, - CodeLocation: codeLocation, - NestingLevel: -1, - }, nil -} - -func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - Text: text, - NodeType: types.NodeTypeReportAfterSuite, - ReportAfterSuiteBody: body, - CodeLocation: codeLocation, - }, nil +var doneType = reflect.TypeOf(make(Done)) + +func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) { + t := reflect.TypeOf(arg) + if t.NumOut() > 0 || t.NumIn() > 1 { + return nil, false + } + if t.NumIn() == 1 { + if t.In(0) == doneType { + deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl) + deprecatedAsyncBody := arg.(func(Done)) + return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false + } else if t.In(0).Implements(specContextType) { + return arg.(func(SpecContext)), true + } else if t.In(0).Implements(contextType) { + body := arg.(func(context.Context)) + return func(c SpecContext) { body(c) }, true + } + + return nil, false + } + + body := arg.(func()) + return func(SpecContext) { body() }, false } -func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { - baseOffset := 2 - node := Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeCleanupInvalid, - CodeLocation: types.NewCodeLocation(baseOffset), - NestingLevel: -1, +var byteType = reflect.TypeOf([]byte{}) + +func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) { + t := reflect.TypeOf(arg) + v := reflect.ValueOf(arg) + + if t.NumOut() > 1 || t.NumIn() > 1 { + return nil, false + } else if t.NumOut() == 1 && t.Out(0) != byteType { + return nil, false + } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) { + return nil, false } - remainingArgs := []interface{}{} - for _, arg := range args { + hasContext := t.NumIn() == 1 + + return func(c SpecContext) []byte { + var out []reflect.Value + if hasContext { + out = v.Call([]reflect.Value{reflect.ValueOf(c)}) + } else { + out = v.Call([]reflect.Value{}) + } + if len(out) == 1 { + return (out[0].Interface()).([]byte) + } else { + return []byte{} + } + }, hasContext +} + +func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) { + t := reflect.TypeOf(arg) + v := reflect.ValueOf(arg) + hasContext, hasByte := false, false + + if t.NumOut() > 0 || t.NumIn() > 2 { + return nil, false + } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType { + hasContext, hasByte = true, true + } else if t.NumIn() == 1 && t.In(0).Implements(contextType) { + hasContext = true + } else if t.NumIn() == 1 && t.In(0) == byteType { + hasByte = true + } else if t.NumIn() != 0 { + return nil, false + } + + return func(c SpecContext, b []byte) { + in := []reflect.Value{} + if hasContext { + in = append(in, reflect.ValueOf(c)) + } + if hasByte { + in = append(in, reflect.ValueOf(b)) + } + v.Call(in) + }, hasContext +} + +var errInterface = reflect.TypeOf((*error)(nil)).Elem() + +func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { + decorations, remainingArgs := PartitionDecorations(args...) + baseOffset := 2 + cl := types.NewCodeLocation(baseOffset) + finalArgs := []interface{}{} + for _, arg := range decorations { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(Offset(0)): - node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset))) + cl = types.NewCodeLocation(baseOffset + int(arg.(Offset))) case t == reflect.TypeOf(types.CodeLocation{}): - node.CodeLocation = arg.(types.CodeLocation) + cl = arg.(types.CodeLocation) default: - remainingArgs = append(remainingArgs, arg) + finalArgs = append(finalArgs, arg) } } + finalArgs = append(finalArgs, cl) if len(remainingArgs) == 0 { - return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} } + callback := reflect.ValueOf(remainingArgs[0]) - if !(callback.Kind() == reflect.Func && callback.Type().NumOut() <= 1) { - return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + if !(callback.Kind() == reflect.Func) { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} } + callArgs := []reflect.Value{} for _, arg := range remainingArgs[1:] { callArgs = append(callArgs, reflect.ValueOf(arg)) } - cl := node.CodeLocation - node.Body = func() { - out := callback.Call(callArgs) - if len(out) == 1 && !out[0].IsNil() { - fail(fmt.Sprintf("DeferCleanup callback returned error: %v", out[0]), cl) + + hasContext := false + t := callback.Type() + if t.NumIn() > 0 { + if t.In(0).Implements(specContextType) { + hasContext = true + } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) { + hasContext = true } } - return node, nil + handleFailure := func(out []reflect.Value) { + if len(out) == 0 { + return + } + last := out[len(out)-1] + if last.Type().Implements(errInterface) && !last.IsNil() { + fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl) + } + } + + if hasContext { + finalArgs = append(finalArgs, func(c SpecContext) { + out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...)) + handleFailure(out) + }) + } else { + finalArgs = append(finalArgs, func() { + out := callback.Call(callArgs) + handleFailure(out) + }) + } + + return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...) } func (n Node) IsZero() bool { @@ -642,6 +875,35 @@ func (n Nodes) FirstNodeMarkedOrdered() Node { return Node{} } +func (n Nodes) IndexOfFirstNodeMarkedOrdered() int { + for i := range n { + if n[i].MarkedOrdered { + return i + } + } + return -1 +} + +func (n Nodes) GetMaxFlakeAttempts() int { + maxFlakeAttempts := 0 + for i := range n { + if n[i].FlakeAttempts > 0 { + maxFlakeAttempts = n[i].FlakeAttempts + } + } + return maxFlakeAttempts +} + +func (n Nodes) GetMaxMustPassRepeatedly() int { + maxMustPassRepeatedly := 0 + for i := range n { + if n[i].MustPassRepeatedly > 0 { + maxMustPassRepeatedly = n[i].MustPassRepeatedly + } + } + return maxMustPassRepeatedly +} + func unrollInterfaceSlice(args interface{}) []interface{} { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 161be820c..84eea0a59 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -7,6 +7,65 @@ import ( "github.com/onsi/ginkgo/v2/types" ) +type SortableSpecs struct { + Specs Specs + Indexes []int +} + +func NewSortableSpecs(specs Specs) *SortableSpecs { + indexes := make([]int, len(specs)) + for i := range specs { + indexes[i] = i + } + return &SortableSpecs{ + Specs: specs, + Indexes: indexes, + } +} +func (s *SortableSpecs) Len() int { return len(s.Indexes) } +func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] } +func (s *SortableSpecs) Less(i, j int) bool { + a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]] + + aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt) + + firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered() + if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID { + // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically + return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID + } + + // if either spec is in an ordered container - only use the nodes up to the outermost ordered container + if firstOrderedAIdx > -1 { + aNodes = aNodes[:firstOrderedAIdx+1] + } + if firstOrderedBIdx > -1 { + bNodes = bNodes[:firstOrderedBIdx+1] + } + + for i := 0; i < len(aNodes) && i < len(bNodes); i++ { + aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation + if aCL.FileName != bCL.FileName { + return aCL.FileName < bCL.FileName + } + if aCL.LineNumber != bCL.LineNumber { + return aCL.LineNumber < bCL.LineNumber + } + } + // either everything is equal or we have different lengths of CLs + if len(aNodes) != len(bNodes) { + return len(aNodes) < len(bNodes) + } + // ok, now we are sure everything was equal. so we use the spec text to break ties + for i := 0; i < len(aNodes); i++ { + if aNodes[i].Text != bNodes[i].Text { + return aNodes[i].Text < bNodes[i].Text + } + } + // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort + return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID +} + type GroupedSpecIndices []SpecIndices type SpecIndices []int @@ -28,12 +87,17 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // Seed a new random source based on thee configured random seed. r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) - // first break things into execution groups + // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs. + sortableSpecs := NewSortableSpecs(specs) + sort.Sort(sortableSpecs) + + // then we break things into execution groups // a group represents a single unit of execution and is a collection of SpecIndices // usually a group is just a single spec, however ordered containers must be preserved as a single group executionGroupIDs := []uint{} executionGroups := map[uint]SpecIndices{} - for idx, spec := range specs { + for _, idx := range sortableSpecs.Indexes { + spec := specs[idx] groupNode := spec.Nodes.FirstNodeMarkedOrdered() if groupNode.IsZero() { groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt) @@ -48,7 +112,6 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs shufflableGroupingIDs := []uint{} shufflableGroupingIDToGroupIDs := map[uint][]uint{} - shufflableGroupingsIDToSortKeys := map[uint]string{} // for each execution group we're going to have to pick a node to represent how the // execution group is grouped for shuffling: @@ -57,7 +120,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, nodeTypesToShuffle = types.NodeTypeIt } - //so, fo reach execution group: + //so, for each execution group: for _, groupID := range executionGroupIDs { // pick out a representative spec representativeSpec := specs[executionGroups[groupID][0]] @@ -72,22 +135,9 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 { // record the shuffleable group ID shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID) - // and record the sort key to use - shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String() } } - // now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id - sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool { - keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]] - keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]] - if keyA == keyB { - return shufflableGroupingIDs[i] < shufflableGroupingIDs[j] - } else { - return keyA < keyB - } - }) - // now we permute the sorted shufflable grouping IDs and build the ordered Groups orderedGroups := GroupedSpecIndices{} permutation := r.Perm(len(shufflableGroupingIDs)) diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go index b59918a81..4a1c09461 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go @@ -143,7 +143,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() { go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) } - // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr) + // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr) // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running interceptor.pipe = <-interceptor.pipeChannel diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go index e875001c2..8a237f446 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -26,9 +26,20 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil stdoutCloneFD, _ := unix.Dup(1) stderrCloneFD, _ := unix.Dup(2) + // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs + // https://github.com/onsi/ginkgo/issues/1191 + flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0) + if err == nil { + unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) + } + flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0) + if err == nil { + unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) + } + // And then wrap the clone file descriptors in files. // One benefit of this (that we don't use yet) is that we can actually write - // to these files to emit output to the console evne though we're intercepting output + // to these files to emit output to the console even though we're intercepting output stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone") stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone") diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go index 7d5cb0b63..b3cd64292 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -42,6 +42,8 @@ type Client interface { PostSuiteWillBegin(report types.Report) error PostDidRun(report types.SpecReport) error PostSuiteDidEnd(report types.Report) error + PostReportBeforeSuiteCompleted(state types.SpecState) error + BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) BlockUntilNonprimaryProcsHaveFinished() error @@ -49,6 +51,7 @@ type Client interface { FetchNextCounter() (int, error) PostAbort() error ShouldAbort() bool + PostEmitProgressReport(report types.ProgressReport) error Write(p []byte) (int, error) } diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go index d076d5d1c..6547c7a66 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -94,6 +94,23 @@ func (client *httpClient) PostSuiteDidEnd(report types.Report) error { return client.post("/suite-did-end", report) } +func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.post("/progress-report", report) +} + +func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.post("/report-before-suite-completed", state) +} + +func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("/report-before-suite-state", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go index ca1dcdca5..d2c71ab1b 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -26,7 +26,7 @@ type httpServer struct { handler *ServerHandler } -//Create a new server, automatically selecting a port +// Create a new server, automatically selecting a port func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -38,7 +38,7 @@ func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, }, nil } -//Start the server. You don't need to `go s.Start()`, just `s.Start()` +// Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *httpServer) Start() { httpServer := &http.Server{} mux := http.NewServeMux() @@ -49,8 +49,11 @@ func (server *httpServer) Start() { mux.HandleFunc("/did-run", server.didRun) mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) mux.HandleFunc("/emit-output", server.emitOutput) + mux.HandleFunc("/progress-report", server.emitProgressReport) //synchronization endpoints + mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) + mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) @@ -62,12 +65,12 @@ func (server *httpServer) Start() { go httpServer.Serve(server.listener) } -//Stop the server +// Stop the server func (server *httpServer) Close() { server.listener.Close() } -//The address the server can be reached it. Pass this into the `ForwardingReporter`. +// The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *httpServer) Address() string { return "http://" + server.listener.Addr().String() } @@ -92,7 +95,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) { // Streaming Endpoints // -//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { defer request.Body.Close() if json.NewDecoder(request.Body).Decode(object) != nil { @@ -155,6 +158,31 @@ func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.R server.handleError(server.handler.EmitOutput(output, &n), writer) } +func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { + var report types.ProgressReport + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if !server.decode(writer, request, &state) { + return + } + + server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { + return + } + json.NewEncoder(writer).Encode(state) +} + func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { var beforeSuiteState BeforeSuiteState if !server.decode(writer, request, &beforeSuiteState) { diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go index 4e83b0970..59e8e6fd0 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -72,6 +72,23 @@ func (client *rpcClient) Write(p []byte) (int, error) { return n, err } +func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.client.Call("Server.EmitProgressReport", report, voidReceiver) +} + +func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) +} + +func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("Server.ReportBeforeSuiteState", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go index ca471cf39..a6d98793e 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -18,16 +18,17 @@ var voidSender Void // It handles all the business logic to avoid duplication between the two servers type ServerHandler struct { - done chan interface{} - outputDestination io.Writer - reporter reporters.Reporter - alives []func() bool - lock *sync.Mutex - beforeSuiteState BeforeSuiteState - parallelTotal int - counter int - counterLock *sync.Mutex - shouldAbort bool + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + reportBeforeSuiteState types.SpecState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool numSuiteDidBegins int numSuiteDidEnds int @@ -37,11 +38,12 @@ type ServerHandler struct { func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { return &ServerHandler{ - reporter: reporter, - lock: &sync.Mutex{}, - counterLock: &sync.Mutex{}, - alives: make([]func() bool, parallelTotal), - beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + parallelTotal: parallelTotal, outputDestination: os.Stdout, done: make(chan interface{}), @@ -108,6 +110,13 @@ func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { return err } +func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reporter.EmitProgressReport(report) + return nil +} + func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { handler.lock.Lock() defer handler.lock.Unlock() @@ -133,6 +142,29 @@ func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { return true } +func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reportBeforeSuiteState = reportBeforeSuiteState + + return nil +} + +func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.reportBeforeSuiteState == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *reportBeforeSuiteState = handler.reportBeforeSuiteState + return nil +} + func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go new file mode 100644 index 000000000..11269cf1f --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -0,0 +1,287 @@ +package internal + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "os/signal" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +var _SOURCE_CACHE = map[string][]string{} + +type ProgressSignalRegistrar func(func()) context.CancelFunc + +func RegisterForProgressSignal(handler func()) context.CancelFunc { + signalChannel := make(chan os.Signal, 1) + if len(PROGRESS_SIGNALS) > 0 { + signal.Notify(signalChannel, PROGRESS_SIGNALS...) + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + for { + select { + case <-signalChannel: + handler() + case <-ctx.Done(): + signal.Stop(signalChannel) + return + } + } + }() + + return cancel +} + +type ProgressStepCursor struct { + Text string + CodeLocation types.CodeLocation + StartTime time.Time +} + +func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { + pr := types.ProgressReport{ + ParallelProcess: report.ParallelProcess, + RunningInParallel: isRunningInParallel, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + LeafNodeText: report.LeafNodeText, + LeafNodeLocation: report.LeafNodeLocation, + SpecStartTime: report.StartTime, + + CurrentNodeType: currentNode.NodeType, + CurrentNodeText: currentNode.Text, + CurrentNodeLocation: currentNode.CodeLocation, + CurrentNodeStartTime: currentNodeStartTime, + + CurrentStepText: currentStep.Message, + CurrentStepLocation: currentStep.CodeLocation, + CurrentStepStartTime: currentStep.TimelineLocation.Time, + + AdditionalReports: additionalReports, + + CapturedGinkgoWriterOutput: gwOutput, + TimelineLocation: timelineLocation, + } + + goroutines, err := extractRunningGoroutines() + if err != nil { + return pr, err + } + pr.Goroutines = goroutines + + // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest: + packagesOfInterest := map[string]bool{} + packageFromFilename := func(filename string) string { + return filepath.Dir(filename) + } + addPackageFor := func(filename string) { + if filename != "" { + packagesOfInterest[packageFromFilename(filename)] = true + } + } + isPackageOfInterest := func(filename string) bool { + stackPackage := packageFromFilename(filename) + for packageOfInterest := range packagesOfInterest { + if strings.HasPrefix(stackPackage, packageOfInterest) { + return true + } + } + return false + } + for _, location := range report.ContainerHierarchyLocations { + addPackageFor(location.FileName) + } + addPackageFor(report.LeafNodeLocation.FileName) + addPackageFor(currentNode.CodeLocation.FileName) + addPackageFor(currentStep.CodeLocation.FileName) + + //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode` + specGoRoutineIdx := -1 + runNodeFunctionCallIdx := -1 +OUTER: + for goroutineIdx, goroutine := range pr.Goroutines { + for functionCallIdx, functionCall := range goroutine.Stack { + if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") { + specGoRoutineIdx = goroutineIdx + runNodeFunctionCallIdx = functionCallIdx + break OUTER + } + } + } + + //Now, we find the first non-Ginkgo function call + if specGoRoutineIdx > -1 { + for runNodeFunctionCallIdx >= 0 { + fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function + file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename + // these are all things that could potentially happen from within ginkgo + if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") { + runNodeFunctionCallIdx-- + continue + } + if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") { + + } + //found it! lets add its package of interest + addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename) + break + } + } + + ginkgoEntryPointIdx := -1 +OUTER_GINKGO_ENTRY_POINT: + for goroutineIdx, goroutine := range pr.Goroutines { + for _, functionCall := range goroutine.Stack { + if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") { + ginkgoEntryPointIdx = goroutineIdx + break OUTER_GINKGO_ENTRY_POINT + } + } + } + + // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest` + // Any goroutines with highlighted lines end up in the HighlightGoRoutines + for goroutineIdx, goroutine := range pr.Goroutines { + if goroutineIdx == ginkgoEntryPointIdx { + continue + } + if goroutineIdx == specGoRoutineIdx { + pr.Goroutines[goroutineIdx].IsSpecGoroutine = true + } + for functionCallIdx, functionCall := range goroutine.Stack { + if isPackageOfInterest(functionCall.Filename) { + goroutine.Stack[functionCallIdx].Highlight = true + goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots) + } + } + } + + if !includeAll { + goroutines := []types.Goroutine{pr.SpecGoroutine()} + goroutines = append(goroutines, pr.HighlightedGoroutines()...) + pr.Goroutines = goroutines + } + + return pr, nil +} + +func extractRunningGoroutines() ([]types.Goroutine, error) { + var stack []byte + for size := 64 * 1024; ; size *= 2 { + stack = make([]byte, size) + if n := runtime.Stack(stack, true); n < size { + stack = stack[:n] + break + } + } + r := bufio.NewReader(bytes.NewReader(stack)) + out := []types.Goroutine{} + idx := -1 + for { + line, err := r.ReadString('\n') + if err == io.EOF { + break + } + + line = strings.TrimSuffix(line, "\n") + + //skip blank lines + if line == "" { + continue + } + + //parse headers for new goroutine frames + if strings.HasPrefix(line, "goroutine") { + out = append(out, types.Goroutine{}) + idx = len(out) - 1 + + line = strings.TrimPrefix(line, "goroutine ") + line = strings.TrimSuffix(line, ":") + fields := strings.SplitN(line, " ", 2) + if len(fields) != 2 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line)) + } + out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1])) + } + + out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]") + continue + } + + //if we are here we must be at a function call entry in the stack + functionCall := types.FunctionCall{ + Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by' + } + + line, err = r.ReadString('\n') + line = strings.TrimSuffix(line, "\n") + if err == io.EOF { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function)) + } + line = strings.TrimLeft(line, " \t") + delimiterIdx := strings.LastIndex(line, ":") + if delimiterIdx == -1 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line)) + } + functionCall.Filename = line[:delimiterIdx] + line = strings.Split(line[delimiterIdx+1:], " ")[0] + lineNumber, err := strconv.ParseInt(line, 10, 64) + functionCall.Line = int(lineNumber) + if err != nil { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) + } + out[idx].Stack = append(out[idx].Stack, functionCall) + } + + return out, nil +} + +func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) { + if filename == "" { + return []string{}, 0 + } + + var lines []string + var ok bool + if lines, ok = _SOURCE_CACHE[filename]; !ok { + sourceRoots := []string{""} + sourceRoots = append(sourceRoots, configuredSourceRoots...) + var data []byte + var err error + var found bool + for _, root := range sourceRoots { + data, err = os.ReadFile(filepath.Join(root, filename)) + if err == nil { + found = true + break + } + } + if !found { + return []string{}, 0 + } + lines = strings.Split(string(data), "\n") + _SOURCE_CACHE[filename] = lines + } + + startIndex := lineNumber - span - 1 + endIndex := startIndex + span + span + 1 + if startIndex < 0 { + startIndex = 0 + } + if endIndex > len(lines) { + endIndex = len(lines) + } + highlightIndex := lineNumber - 1 - startIndex + return lines[startIndex:endIndex], highlightIndex +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go new file mode 100644 index 000000000..61e0ed306 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go @@ -0,0 +1,11 @@ +//go:build freebsd || openbsd || netbsd || darwin || dragonfly +// +build freebsd openbsd netbsd darwin dragonfly + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go new file mode 100644 index 000000000..ad30de459 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go @@ -0,0 +1,11 @@ +//go:build linux || solaris +// +build linux solaris + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go new file mode 100644 index 000000000..0eca2516a --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package internal + +import "os" + +var PROGRESS_SIGNALS = []os.Signal{} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go new file mode 100644 index 000000000..2c6e260f7 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go @@ -0,0 +1,79 @@ +package internal + +import ( + "context" + "sort" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +type ProgressReporterManager struct { + lock *sync.Mutex + progressReporters map[int]func() string + prCounter int +} + +func NewProgressReporterManager() *ProgressReporterManager { + return &ProgressReporterManager{ + progressReporters: map[int]func() string{}, + lock: &sync.Mutex{}, + } +} + +func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() { + prm.lock.Lock() + defer prm.lock.Unlock() + prm.prCounter += 1 + prCounter := prm.prCounter + prm.progressReporters[prCounter] = reporter + + return func() { + prm.lock.Lock() + defer prm.lock.Unlock() + delete(prm.progressReporters, prCounter) + } +} + +func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string { + prm.lock.Lock() + keys := []int{} + for key := range prm.progressReporters { + keys = append(keys, key) + } + sort.Ints(keys) + reporters := []func() string{} + for _, key := range keys { + reporters = append(reporters, prm.progressReporters[key]) + } + prm.lock.Unlock() + + if len(reporters) == 0 { + return nil + } + out := []string{} + for _, reporter := range reporters { + reportC := make(chan string, 1) + go func() { + defer func() { + e := recover() + if e != nil { + failer.Panic(types.NewCodeLocationWithStackTrace(1), e) + reportC <- "failed to query attached progress reporter" + } + }() + reportC <- reporter() + }() + var report string + select { + case report = <-reportC: + case <-ctx.Done(): + return out + } + if strings.TrimSpace(report) != "" { + out = append(out, report) + } + } + return out +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go index 74199f395..cc351a39b 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -1,7 +1,6 @@ package internal import ( - "reflect" "time" "github.com/onsi/ginkgo/v2/types" @@ -13,20 +12,20 @@ func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (Re out := ReportEntry{ Visibility: types.ReportEntryVisibilityAlways, Name: name, - Time: time.Now(), Location: cl, + Time: time.Now(), } var didSetValue = false for _, arg := range args { - switch reflect.TypeOf(arg) { - case reflect.TypeOf(types.ReportEntryVisibilityAlways): - out.Visibility = arg.(types.ReportEntryVisibility) - case reflect.TypeOf(types.CodeLocation{}): - out.Location = arg.(types.CodeLocation) - case reflect.TypeOf(Offset(0)): - out.Location = types.NewCodeLocation(2 + int(arg.(Offset))) - case reflect.TypeOf(out.Time): - out.Time = arg.(time.Time) + switch x := arg.(type) { + case types.ReportEntryVisibility: + out.Visibility = x + case types.CodeLocation: + out.Location = x + case Offset: + out.Location = types.NewCodeLocation(2 + int(x)) + case time.Time: + out.Time = x default: if didSetValue { return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg) diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/spec.go index 92072edd4..7c4ee5bb7 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/spec.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/spec.go @@ -2,6 +2,7 @@ package internal import ( "strings" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -40,6 +41,21 @@ func (s Spec) FlakeAttempts() int { return flakeAttempts } +func (s Spec) MustPassRepeatedly() int { + mustPassRepeatedly := 0 + for i := range s.Nodes { + if s.Nodes[i].MustPassRepeatedly > 0 { + mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly + } + } + + return mustPassRepeatedly +} + +func (s Spec) SpecTimeout() time.Duration { + return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout +} + type Specs []Spec func (s Specs) HasAnySpecsMarkedPending() bool { diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go new file mode 100644 index 000000000..2515b84a1 --- /dev/null +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -0,0 +1,47 @@ +package internal + +import ( + "context" + + "github.com/onsi/ginkgo/v2/types" +) + +type SpecContext interface { + context.Context + + SpecReport() types.SpecReport + AttachProgressReporter(func() string) func() +} + +type specContext struct { + context.Context + *ProgressReporterManager + + cancel context.CancelFunc + + suite *Suite +} + +/* +SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context. + +Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses. + +This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. +*/ +func NewSpecContext(suite *Suite) *specContext { + ctx, cancel := context.WithCancel(context.Background()) + sc := &specContext{ + cancel: cancel, + suite: suite, + ProgressReporterManager: NewProgressReporterManager(), + } + ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo + sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies + + return sc +} + +func (sc *specContext) SpecReport() types.SpecReport { + return sc.suite.CurrentSpecReport() +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 8797895b7..ea0d259d9 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -2,13 +2,14 @@ package internal import ( "fmt" + "sync" "time" - "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal/interrupt_handler" "github.com/onsi/ginkgo/v2/internal/parallel_support" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/net/context" ) type Phase uint @@ -19,10 +20,14 @@ const ( PhaseRun ) +var PROGRESS_REPORTER_DEADLING = 5 * time.Second + type Suite struct { tree *TreeNode topLevelContainers Nodes + *ProgressReporterManager + phase Phase suiteNodes Nodes @@ -34,19 +39,41 @@ type Suite struct { outputInterceptor OutputInterceptor interruptHandler interrupt_handler.InterruptHandlerInterface config types.SuiteConfig + deadline time.Time + + skipAll bool + report types.Report + currentSpecReport types.SpecReport + currentNode Node + currentNodeStartTime time.Time + + currentSpecContext *specContext + + currentByStep types.SpecEvent + timelineOrder int - skipAll bool - report types.Report - currentSpecReport types.SpecReport - currentNode Node + /* + We don't need to lock around all operations. Just those that *could* happen concurrently. + + Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in. + + However, there are some operations that can happen concurrently: + + - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent. + - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself. + */ + selectiveLock *sync.Mutex client parallel_support.Client } func NewSuite() *Suite { return &Suite{ - tree: &TreeNode{}, - phase: PhaseBuildTopLevel, + tree: &TreeNode{}, + phase: PhaseBuildTopLevel, + ProgressReporterManager: NewProgressReporterManager(), + + selectiveLock: &sync.Mutex{}, } } @@ -63,7 +90,7 @@ func (suite *Suite) BuildTree() error { return nil } -func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) { +func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } @@ -80,8 +107,16 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string suite.interruptHandler = interruptHandler suite.config = suiteConfig + if suite.config.Timeout > 0 { + suite.deadline = time.Now().Add(suite.config.Timeout) + } + + cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) + success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + cancelProgressHandler() + return success, hasProgrammaticFocus } @@ -100,7 +135,7 @@ func (suite *Suite) PushNode(node Node) error { return suite.pushCleanupNode(node) } - if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) { + if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { return suite.pushSuiteNode(node) } @@ -122,6 +157,13 @@ func (suite *Suite) PushNode(node Node) error { } } + if node.MarkedContinueOnFailure { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if !firstOrderedNode.IsZero() { + return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation) + } + } + if node.NodeType == types.NodeTypeContainer { // During PhaseBuildTopLevel we only track the top level containers without entering them // We only enter the top level container nodes during PhaseBuildTree @@ -143,7 +185,7 @@ func (suite *Suite) PushNode(node Node) error { err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) } }() - node.Body() + node.Body(nil) return err }() suite.tree = parentTree @@ -193,7 +235,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeType = types.NodeTypeCleanupAfterSuite case types.NodeTypeBeforeAll, types.NodeTypeAfterAll: node.NodeType = types.NodeTypeCleanupAfterAll - case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite: + case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite: return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType) case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite: return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation) @@ -203,19 +245,86 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel + suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) + suite.selectiveLock.Unlock() return nil } +func (suite *Suite) generateTimelineLocation() types.TimelineLocation { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + + suite.timelineOrder += 1 + return types.TimelineLocation{ + Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(), + Order: suite.timelineOrder, + Time: time.Now(), + } +} + +func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent { + event.TimelineLocation = suite.generateTimelineLocation() + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) + return event +} + +func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) { + event := startEvent + event.SpecEventType = eventType + event.TimelineLocation = suite.generateTimelineLocation() + event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time) + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) +} + +func (suite *Suite) By(text string, callback ...func()) error { + cl := types.NewCodeLocation(2) + if suite.phase != PhaseRun { + return types.GinkgoErrors.ByNotDuringRunPhase(cl) + } + + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventByStart, + CodeLocation: cl, + Message: text, + }) + suite.selectiveLock.Lock() + suite.currentByStep = event + suite.selectiveLock.Unlock() + + if len(callback) == 1 { + defer func() { + suite.selectiveLock.Lock() + suite.currentByStep = types.SpecEvent{} + suite.selectiveLock.Unlock() + suite.handleSpecEventEnd(types.SpecEventByEnd, event) + }() + callback[0]() + } else if len(callback) > 1 { + panic("just one callback per By, please") + } + return nil +} + /* - Spec Running methods - used during PhaseRun +Spec Running methods - used during PhaseRun */ func (suite *Suite) CurrentSpecReport() types.SpecReport { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() report := suite.currentSpecReport if suite.writer != nil { report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) } + report.ReportEntries = make([]ReportEntry, len(report.ReportEntries)) + copy(report.ReportEntries, suite.currentSpecReport.ReportEntries) return report } @@ -223,10 +332,56 @@ func (suite *Suite) AddReportEntry(entry ReportEntry) error { if suite.phase != PhaseRun { return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) } + entry.TimelineLocation = suite.generateTimelineLocation() + entry.Time = entry.TimelineLocation.Time + suite.selectiveLock.Lock() suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry) + suite.selectiveLock.Unlock() + suite.reporter.EmitReportEntry(entry) return nil } +func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport { + timelineLocation := suite.generateTimelineLocation() + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + + deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING) + defer cancel() + var additionalReports []string + if suite.currentSpecContext != nil { + additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...) + } + additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...) + gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes()) + pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport) + + if err != nil { + fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error()) + } + return pr +} + +func (suite *Suite) handleProgressSignal() { + report := suite.generateProgressReport(false) + report.Message = "{{bold}}You've requested a progress report:{{/}}" + suite.emitProgressReport(report) +} + +func (suite *Suite) emitProgressReport(report types.ProgressReport) { + suite.selectiveLock.Lock() + suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput()) + suite.selectiveLock.Unlock() + + suite.reporter.EmitProgressReport(report) + if suite.isRunningInParallel() { + err := suite.client.PostEmitProgressReport(report) + if err != nil { + fmt.Println(err.Error()) + } + } +} + func (suite *Suite) isRunningInParallel() bool { return suite.config.ParallelTotal > 1 } @@ -271,7 +426,13 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } suite.report.SuiteSucceeded = true - suite.runBeforeSuite(numSpecsThatWillBeRun) + + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite) + + ranBeforeSuite := suite.report.SuiteSucceeded + if suite.report.SuiteSucceeded { + suite.runBeforeSuite(numSpecsThatWillBeRun) + } if suite.report.SuiteSucceeded { groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config) @@ -310,19 +471,23 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } } - suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + if ranBeforeSuite { + suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + } interruptStatus := suite.interruptHandler.Status() - if interruptStatus.Interrupted { + if interruptStatus.Interrupted() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String()) suite.report.SuiteSucceeded = false } suite.report.EndTime = time.Now() suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime) - - if suite.config.ParallelProcess == 1 { - suite.runReportAfterSuite() + if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed") + suite.report.SuiteSucceeded = false } + + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite) suite.reporter.SuiteDidEnd(suite.report) if suite.isRunningInParallel() { suite.client.PostSuiteDidEnd(suite.report) @@ -332,16 +497,19 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { - interruptStatus := suite.interruptHandler.Status() beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) - if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 { + if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: beforeSuiteNode.NodeType, - LeafNodeLocation: beforeSuiteNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: beforeSuiteNode.NodeType, + LeafNodeLocation: beforeSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel) + suite.runSuiteNode(beforeSuiteNode) if suite.currentSpecReport.State.Is(types.SpecStateSkipped) { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite") suite.skipAll = true @@ -353,50 +521,40 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: afterSuiteNode.NodeType, - LeafNodeLocation: afterSuiteNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: afterSuiteNode.NodeType, + LeafNodeLocation: afterSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel) + suite.runSuiteNode(afterSuiteNode) suite.processCurrentSpecReport() } afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse() if len(afterSuiteCleanup) > 0 { for _, cleanupNode := range afterSuiteCleanup { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: cleanupNode.NodeType, - LeafNodeLocation: cleanupNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: cleanupNode.NodeType, + LeafNodeLocation: cleanupNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel) + suite.runSuiteNode(cleanupNode) suite.processCurrentSpecReport() } } } -func (suite *Suite) runReportAfterSuite() { - for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) { - suite.currentSpecReport = types.SpecReport{ - LeafNodeType: node.NodeType, - LeafNodeLocation: node.CodeLocation, - LeafNodeText: node.Text, - ParallelProcess: suite.config.ParallelProcess, - } - suite.reporter.WillRun(suite.currentSpecReport) - suite.runReportAfterSuiteNode(node, suite.report) - suite.processCurrentSpecReport() - } -} - func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { - if suite.config.DryRun { - return - } - nodes := spec.Nodes.WithType(nodeType) if nodeType == types.NodeTypeReportAfterEach { nodes = nodes.SortedByDescendingNestingLevel() @@ -412,16 +570,11 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func() { + nodes[i].Body = func(SpecContext) { nodes[i].ReportEachBody(report) } - suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, - "{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node. To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}", - nodeType, nodeType, nodeType, - nodes[i].CodeLocation, - )) - state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i])) - suite.interruptHandler.ClearInterruptPlaceholderMessage() + state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) + // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state. // Also, if the reporter is every aborted - always override the state to propagate the abort if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) { @@ -433,7 +586,7 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { } } -func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { +func (suite *Suite) runSuiteNode(node Node) { if suite.config.DryRun { suite.currentSpecReport.State = types.SpecStatePassed return @@ -446,13 +599,13 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { var err error switch node.NodeType { case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite: - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") case types.NodeTypeCleanupAfterSuite: if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 { err = suite.client.BlockUntilNonprimaryProcsHaveFinished() } if err == nil { - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") } case types.NodeTypeSynchronizedBeforeSuite: var data []byte @@ -462,8 +615,9 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) } - node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() } - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) } + node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") if suite.config.ParallelTotal > 1 { suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutput() @@ -480,19 +634,21 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { switch proc1State { case types.SpecStatePassed: runAllProcs = true - case types.SpecStateFailed, types.SpecStatePanicked: + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1() case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped: suite.currentSpecReport.State = proc1State } } if runAllProcs { - node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) } - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) } + node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") } case types.NodeTypeSynchronizedAfterSuite: node.Body = node.SynchronizedAfterSuiteAllProcsBody - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") if suite.config.ParallelProcess == 1 { if suite.config.ParallelTotal > 1 { err = suite.client.BlockUntilNonprimaryProcsHaveFinished() @@ -504,7 +660,8 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { } node.Body = node.SynchronizedAfterSuiteProc1Body - state, failure := suite.runNode(node, interruptChannel, "") + node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext + state, failure := suite.runNode(node, time.Time{}, "") if suite.currentSpecReport.State.Is(types.SpecStatePassed) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure } @@ -514,68 +671,122 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) } suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() - - return } -func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { - if suite.config.DryRun { - suite.currentSpecReport.State = types.SpecStatePassed +func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) { + nodes := suite.suiteNodes.WithType(nodeType) + // only run ReportAfterSuite on proc 1 + if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 { return } + // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 { + state, err := suite.client.BlockUntilReportBeforeSuiteCompleted() + if err != nil || state.Is(types.SpecStateFailed) { + suite.report.SuiteSucceeded = false + } + return + } + + for _, node := range nodes { + suite.selectiveLock.Lock() + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: node.NodeType, + LeafNodeLocation: node.CodeLocation, + LeafNodeText: node.Text, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), + } + suite.selectiveLock.Unlock() + + suite.reporter.WillRun(suite.currentSpecReport) + suite.runReportSuiteNode(node, suite.report) + suite.processCurrentSpecReport() + } + // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 { + if suite.report.SuiteSucceeded { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed) + } else { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed) + } + } +} + +func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() suite.currentSpecReport.StartTime = time.Now() - if suite.config.ParallelTotal > 1 { + // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and + // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it + if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() { aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport() if err != nil { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) return } report = report.Add(aggregatedReport) } - node.Body = func() { node.ReportAfterSuiteBody(report) } - suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, - "{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node. To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}", - node.CodeLocation, - )) - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "") - suite.interruptHandler.ClearInterruptPlaceholderMessage() + node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput() - - return } -func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) { +func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) { if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node) } + interruptStatus := suite.interruptHandler.Status() + if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { + return types.SpecStateSkipped, types.Failure{} + } + if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) { + return types.SpecStateSkipped, types.Failure{} + } + if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { + return types.SpecStateSkipped, types.Failure{} + } + + suite.selectiveLock.Lock() suite.currentNode = node + suite.currentNodeStartTime = time.Now() + suite.currentByStep = types.SpecEvent{} + suite.selectiveLock.Unlock() defer func() { + suite.selectiveLock.Lock() suite.currentNode = Node{} + suite.currentNodeStartTime = time.Time{} + suite.selectiveLock.Unlock() }() - if suite.config.EmitSpecProgress { - if text == "" { - text = "TOP-LEVEL" - } - s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String()) - suite.writer.Write([]byte(s)) + if text == "" { + text = "TOP-LEVEL" } + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventNodeStart, + NodeType: node.NodeType, + Message: text, + CodeLocation: node.CodeLocation, + }) + defer func() { + suite.handleSpecEventEnd(types.SpecEventNodeEnd, event) + }() var failure types.Failure failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation @@ -586,6 +797,54 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s } else { failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1 } + var outcome types.SpecState + + gracePeriod := suite.config.GracePeriod + if node.GracePeriod >= 0 { + gracePeriod = node.GracePeriod + } + + now := time.Now() + deadline := suite.deadline + timeoutInPlay := "suite" + if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) { + deadline = specDeadline + timeoutInPlay = "spec" + } + if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) { + deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" + } + if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { + //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + if node.NodeTimeout > 0 { + deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" + } else { + deadline = now.Add(gracePeriod) + timeoutInPlay = "grace period" + } + } + + if !node.HasContext { + // this maps onto the pre-context behavior: + // - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt + // - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted + gracePeriod = 0 + } + + sc := NewSpecContext(suite) + defer sc.cancel() + + suite.selectiveLock.Lock() + suite.currentSpecContext = sc + suite.selectiveLock.Unlock() + + var deadlineChannel <-chan time.Time + if !deadline.IsZero() { + deadlineChannel = time.After(deadline.Sub(now)) + } + var gracePeriodChannel <-chan time.Time outcomeC := make(chan types.SpecState) failureC := make(chan types.Failure) @@ -597,33 +856,153 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e) } - outcome, failureFromRun := suite.failer.Drain() - outcomeC <- outcome + outcomeFromRun, failureFromRun := suite.failer.Drain() + failureFromRun.TimelineLocation = suite.generateTimelineLocation() + outcomeC <- outcomeFromRun failureC <- failureFromRun }() - node.Body() + node.Body(sc) finished = true }() - select { - case outcome := <-outcomeC: - failureFromRun := <-failureC - if outcome == types.SpecStatePassed { - return outcome, types.Failure{} + // progress polling timer and channel + var emitProgressNow <-chan time.Time + var progressPoller *time.Timer + var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval + if node.PollProgressAfter >= 0 { + pollProgressAfter = node.PollProgressAfter + } + if node.PollProgressInterval >= 0 { + pollProgressInterval = node.PollProgressInterval + } + if pollProgressAfter > 0 { + progressPoller = time.NewTimer(pollProgressAfter) + emitProgressNow = progressPoller.C + defer progressPoller.Stop() + } + + // now we wait for an outcome, an interrupt, a timeout, or a progress poll + for { + select { + case outcomeFromRun := <-outcomeC: + failureFromRun := <-failureC + if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) { + // we've already been interrupted/timed out. we just managed to actually exit + // before the grace period elapsed + // if we have a failure message we attach it as an additional failure + if outcomeFromRun != types.SpecStatePassed { + additionalFailure := types.AdditionalFailure{ + State: outcomeFromRun, + Failure: failure, //we make a copy - this will include all the configuration set up above... + } + //...and then we update the failure with the details from failureFromRun + additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + additionalFailure.Failure.ProgressReport = types.ProgressReport{} + if outcome == types.SpecStateTimedout { + additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message) + } else { + additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message) + } + suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure) + failure.AdditionalFailure = &additionalFailure + } + return outcome, failure + } + if outcomeFromRun.Is(types.SpecStatePassed) { + return outcomeFromRun, types.Failure{} + } else { + failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + suite.reporter.EmitFailure(outcomeFromRun, failure) + return outcomeFromRun, failure + } + case <-gracePeriodChannel: + if node.HasContext && outcome.Is(types.SpecStateTimedout) { + report := suite.generateProgressReport(false) + report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:" + suite.emitProgressReport(report) + } + return outcome, failure + case <-deadlineChannel: + // we're out of time - the outcome is a timeout and we capture the failure and progress report + outcome = types.SpecStateTimedout + failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation() + failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay) + deadlineChannel = nil + suite.reporter.EmitFailure(outcome, failure) + + // tell the spec to stop. it's important we generate the progress report first to make sure we capture where + // the spec is actually stuck + sc.cancel() + //and now we wait for the grace period + gracePeriodChannel = time.After(gracePeriod) + case <-interruptStatus.Channel: + interruptStatus = suite.interruptHandler.Status() + // ignore interruption from other process if we are cleaning up or reporting + if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess && + node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { + continue + } + + deadlineChannel = nil // don't worry about deadlines, time's up now + + failureTimelineLocation := suite.generateTimelineLocation() + progressReport := suite.generateProgressReport(true) + + if outcome == types.SpecStateInvalid { + outcome = types.SpecStateInterrupted + failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation + if interruptStatus.ShouldIncludeProgressReport() { + failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}" + } + suite.reporter.EmitFailure(outcome, failure) + } + + progressReport = progressReport.WithoutOtherGoroutines() + sc.cancel() + + if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { + if interruptStatus.ShouldIncludeProgressReport() { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message()) + suite.emitProgressReport(progressReport) + } + return outcome, failure + } + if interruptStatus.ShouldIncludeProgressReport() { + if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly { + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + } + suite.emitProgressReport(progressReport) + } + + if gracePeriodChannel == nil { + // we haven't given grace yet... so let's + gracePeriodChannel = time.After(gracePeriod) + } else { + // we've already given grace. time's up. now. + return outcome, failure + } + case <-emitProgressNow: + report := suite.generateProgressReport(false) + report.Message = "{{bold}}Automatically polling progress:{{/}}" + suite.emitProgressReport(report) + if pollProgressInterval > 0 { + progressPoller.Reset(pollProgressInterval) + } } - failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic - return outcome, failure - case <-interruptChannel: - failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation - return types.SpecStateInterrupted, failure } } +// TODO: search for usages and consider if reporter.EmitFailure() is necessary func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure { return types.Failure{ Message: message, Location: node.CodeLocation, + TimelineLocation: suite.generateTimelineLocation(), FailureNodeContext: types.FailureNodeIsLeafNode, FailureNodeType: node.NodeType, FailureNodeLocation: node.CodeLocation, diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 2f42b2642..73e265565 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -5,34 +5,62 @@ import ( "io" "os" + "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" ) type failFunc func(message string, callerSkip ...int) type skipFunc func(message string, callerSkip ...int) -type cleanupFunc func(args ...interface{}) +type cleanupFunc func(args ...any) type reportFunc func() types.SpecReport +type addReportEntryFunc func(names string, args ...any) +type ginkgoWriterInterface interface { + io.Writer -func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy { + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) +} +type ginkgoRecoverFunc func() +type attachProgressReporterFunc func(func() string) func() + +func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy { return &ginkgoTestingTProxy{ - fail: fail, - offset: offset, - writer: writer, - skip: skip, - cleanup: cleanup, - report: report, + fail: fail, + offset: offset, + writer: writer, + skip: skip, + cleanup: cleanup, + report: report, + addReportEntry: addReportEntry, + ginkgoRecover: ginkgoRecover, + attachProgressReporter: attachProgressReporter, + randomSeed: randomSeed, + parallelProcess: parallelProcess, + parallelTotal: parallelTotal, + f: formatter.NewWithNoColorBool(noColor), } } type ginkgoTestingTProxy struct { - fail failFunc - skip skipFunc - cleanup cleanupFunc - report reportFunc - offset int - writer io.Writer -} + fail failFunc + skip skipFunc + cleanup cleanupFunc + report reportFunc + offset int + writer ginkgoWriterInterface + addReportEntry addReportEntryFunc + ginkgoRecover ginkgoRecoverFunc + attachProgressReporter attachProgressReporterFunc + randomSeed int64 + parallelProcess int + parallelTotal int + f formatter.Formatter +} + +// basic testing.T support func (t *ginkgoTestingTProxy) Cleanup(f func()) { t.cleanup(f, internal.Offset(1)) @@ -81,7 +109,7 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { } func (t *ginkgoTestingTProxy) Helper() { - // No-op + types.MarkAsHelper(1) } func (t *ginkgoTestingTProxy) Log(args ...interface{}) { @@ -126,3 +154,57 @@ func (t *ginkgoTestingTProxy) TempDir() string { return tmpDir } + +// FullGinkgoTInterface +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) Print(a ...any) { + t.writer.Print(a...) +} +func (t *ginkgoTestingTProxy) Printf(format string, a ...any) { + t.writer.Printf(format, a...) +} +func (t *ginkgoTestingTProxy) Println(a ...any) { + t.writer.Println(a...) +} +func (t *ginkgoTestingTProxy) F(format string, args ...any) string { + return t.f.F(format, args...) +} +func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string { + return t.f.Fi(indentation, format, args...) +} +func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { + return t.f.Fiw(indentation, maxWidth, format, args...) +} +func (t *ginkgoTestingTProxy) RenderTimeline() string { + return reporters.RenderTimeline(t.report(), false) +} +func (t *ginkgoTestingTProxy) GinkgoRecover() { + t.ginkgoRecover() +} +func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) { + finalArgs := []any{internal.Offset(1)} + t.cleanup(append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) RandomSeed() int64 { + return t.randomSeed +} +func (t *ginkgoTestingTProxy) ParallelProcess() int { + return t.parallelProcess +} +func (t *ginkgoTestingTProxy) ParallelTotal() int { + return t.parallelTotal +} +func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { + return t.attachProgressReporter(f) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/api/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index 70f0a41f6..574f172df 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -5,6 +5,9 @@ import ( "fmt" "io" "sync" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" ) type WriterMode uint @@ -19,24 +22,30 @@ type WriterInterface interface { Truncate() Bytes() []byte + Len() int } -//Writer implements WriterInterface and GinkgoWriterInterface +// Writer implements WriterInterface and GinkgoWriterInterface type Writer struct { buffer *bytes.Buffer outWriter io.Writer lock *sync.Mutex mode WriterMode + streamIndent []byte + indentNext bool + teeWriters []io.Writer } func NewWriter(outWriter io.Writer) *Writer { return &Writer{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - outWriter: outWriter, - mode: WriterModeStreamAndBuffer, + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + mode: WriterModeStreamAndBuffer, + streamIndent: []byte(" "), + indentNext: true, } } @@ -46,6 +55,14 @@ func (w *Writer) SetMode(mode WriterMode) { w.mode = mode } +func (w *Writer) Len() int { + w.lock.Lock() + defer w.lock.Unlock() + return w.buffer.Len() +} + +var newline = []byte("\n") + func (w *Writer) Write(b []byte) (n int, err error) { w.lock.Lock() defer w.lock.Unlock() @@ -55,7 +72,21 @@ func (w *Writer) Write(b []byte) (n int, err error) { } if w.mode == WriterModeStreamAndBuffer { - w.outWriter.Write(b) + line, remaining, found := []byte{}, b, false + for len(remaining) > 0 { + line, remaining, found = bytes.Cut(remaining, newline) + if len(line) > 0 { + if w.indentNext { + w.outWriter.Write(w.streamIndent) + w.indentNext = false + } + w.outWriter.Write(line) + } + if found { + w.outWriter.Write(newline) + w.indentNext = true + } + } } return w.buffer.Write(b) } @@ -75,7 +106,7 @@ func (w *Writer) Bytes() []byte { return copied } -//GinkgoWriterInterface +// GinkgoWriterInterface func (w *Writer) TeeTo(writer io.Writer) { w.lock.Lock() defer w.lock.Unlock() @@ -101,3 +132,9 @@ func (w *Writer) Printf(format string, a ...interface{}) { func (w *Writer) Println(a ...interface{}) { fmt.Fprintln(w, a...) } + +func GinkgoLogrFunc(writer *Writer) logr.Logger { + return funcr.New(func(prefix, args string) { + writer.Printf("%s\n", args) + }, funcr.Options{}) +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/api/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index f39802ffa..56b7be758 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -12,6 +12,8 @@ import ( "io" "runtime" "strings" + "sync" + "time" "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/types" @@ -22,13 +24,16 @@ type DefaultReporter struct { writer io.Writer // managing the emission stream - lastChar string + lastCharWasNewline bool lastEmissionWasDelimiter bool // rendering specDenoter string retryDenoter string formatter formatter.Formatter + + runningInParallel bool + lock *sync.Mutex } func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { @@ -43,12 +48,13 @@ func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultRep conf: conf, writer: writer, - lastChar: "\n", + lastCharWasNewline: true, lastEmissionWasDelimiter: false, specDenoter: "•", retryDenoter: "↺", formatter: formatter.NewWithNoColorBool(conf.NoColor), + lock: &sync.Mutex{}, } if runtime.GOOS == "windows" { reporter.specDenoter = "+" @@ -96,247 +102,557 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { } } -func (r *DefaultReporter) WillRun(report types.SpecReport) { - if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) { +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 0 { + r.emitBlock("\n") + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) return } - r.emitDelimiter() - indentation := uint(0) - if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText)) + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) } else { - if len(report.ContainerHierarchyTexts) > 0 { - r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " ")) - indentation = 1 + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) } - line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText) - labels := report.Labels() - if len(labels) > 0 { - line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", ")) + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) } - r.emitBlock(line) + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) } - r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation)) } -func (r *DefaultReporter) DidRun(report types.SpecReport) { +func (r *DefaultReporter) WillRun(report types.SpecReport) { v := r.conf.Verbosity() - var header, highlightColor string - includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter - succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct) + if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { + return + } - hasGW := report.CapturedGinkgoWriterOutput != "" - hasStd := report.CapturedStdOutErr != "" - hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose))) + r.emitDelimiter(0) + r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) +} +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + inParallel := report.RunningInParallel + + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - denoter = fmt.Sprintf("[%s]", report.LeafNodeType) + header = fmt.Sprintf("[%s]", report.LeafNodeType) + } + highlightColor := r.highlightColorForState(report.State) + + // have we already been streaming the timeline? + timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel + + // should we show the timeline? + var timeline types.Timeline + showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) + if showTimeline { + timeline = report.Timeline().WithoutHiddenReportEntries() + keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || + (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || + (report.Failed() && r.conf.ShowNodeEvents) + if !keepVeryVerboseSpecEvents { + timeline = timeline.WithoutVeryVerboseSpecEvents() + } + if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { + // the timeline is completely empty - don't show it + showTimeline = false + } + if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { + //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report + failure, isFailure := timeline[0].(types.Failure) + if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { + showTimeline = false + } + } } + // should we have a separate section for always-visible reports? + showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) + + // should we have a separate section for captured stdout/stderr + showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") + + // given all that - do we have any actual content to show? or are we a single denoter in a stream? + reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) + + // should we show a runtime? + includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") + + // should we show the codelocation block? + showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) + switch report.State { case types.SpecStatePassed: - highlightColor, succinctLocationBlock = "{{green}}", v.LT(types.VerbosityLevelVerbose) - emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { + return + } if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports { - header = fmt.Sprintf("%s PASSED", denoter) - } else { - return - } - } else { - header, stream = denoter, true - if report.NumAttempts > 1 { - header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false - } - if report.RunTime > r.conf.SlowSpecThreshold { - header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false - } + header = fmt.Sprintf("%s PASSED", header) } - if hasStd || emitGinkgoWriterOutput || hasEmittableReports { - stream = false + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { + header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true } case types.SpecStatePending: - highlightColor = "{{yellow}}" - includeRuntime, emitGinkgoWriterOutput = false, false - if v.Is(types.VerbosityLevelSuccinct) { - header, stream = "P", true - } else { - header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose) + header = "P" + if v.GT(types.VerbosityLevelSuccinct) { + header, reportHasContent = "P [PENDING]", true } case types.SpecStateSkipped: - highlightColor = "{{cyan}}" - if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) { - header = "S [SKIPPED]" - } else { - header, stream = "S", true + header = "S" + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { + header, reportHasContent = "S [SKIPPED]", true + } + default: + header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) + if report.MaxMustPassRepeatedly > 1 { + header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) } - case types.SpecStateFailed: - highlightColor, header = "{{red}}", fmt.Sprintf("%s [FAILED]", denoter) - case types.SpecStatePanicked: - highlightColor, header = "{{magenta}}", fmt.Sprintf("%s! [PANICKED]", denoter) - case types.SpecStateInterrupted: - highlightColor, header = "{{orange}}", fmt.Sprintf("%s! [INTERRUPTED]", denoter) - case types.SpecStateAborted: - highlightColor, header = "{{coral}}", fmt.Sprintf("%s! [ABORTED]", denoter) } - // Emit stream and return - if stream { + // If we have no content to show, jsut emit the header and return + if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) return } - // Emit header - r.emitDelimiter() if includeRuntime { header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) } - r.emitBlock(r.f(highlightColor + header + "{{/}}")) - // Emit Code Location Block - r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false)) + // Emit header + if !timelineHasBeenStreaming { + r.emitDelimiter(0) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + if showCodeLocation { + r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) + } //Emit Stdout/Stderr Output - if hasStd { + if showSeparateStdSection { r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}")) + r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) } - //Emit Captured GinkgoWriter Output - if emitGinkgoWriterOutput && hasGW { + if showSeparateVisibilityAlwaysReportsSection { r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) - r.emitBlock(r.fi(2, "%s", report.CapturedGinkgoWriterOutput)) - r.emitBlock(r.fi(1, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) } - if hasEmittableReports { + if showTimeline { r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}")) - reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) - if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) { - reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose) - } - for _, entry := range reportEntries { - r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) - if representation := entry.StringRepresentation(); representation != "" { - r.emitBlock(r.fi(3, representation)) - } - } - r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}")) + r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) + r.emitTimeline(1, report, timeline) + r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) } // Emit Failure Message - if !report.Failure.IsZero() { + if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.Message)) - r.emitBlock(r.fi(1, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.Location)) - if report.Failure.ForwardedPanic != "" { - r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.ForwardedPanic)) + r.emitFailure(1, report.State, report.Failure, true) + if len(report.AdditionalFailures) > 0 { + r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) } + } - if r.conf.FullTrace || report.Failure.ForwardedPanic != "" { - r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"Full Stack Trace{{/}}")) - r.emitBlock(r.fi(2, "%s", report.Failure.Location.FullStackTrace)) - } + r.emitDelimiter(0) +} + +func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { + switch state { + case types.SpecStatePassed: + return "{{green}}" + case types.SpecStatePending: + return "{{yellow}}" + case types.SpecStateSkipped: + return "{{cyan}}" + case types.SpecStateFailed: + return "{{red}}" + case types.SpecStateTimedout: + return "{{orange}}" + case types.SpecStatePanicked: + return "{{magenta}}" + case types.SpecStateInterrupted: + return "{{orange}}" + case types.SpecStateAborted: + return "{{coral}}" + default: + return "{{gray}}" } +} - r.emitDelimiter() +func (r *DefaultReporter) humanReadableState(state types.SpecState) string { + return strings.ToUpper(state.String()) } -func (r *DefaultReporter) SuiteDidEnd(report types.Report) { - failures := report.SpecReports.WithState(types.SpecStateFailureStates) - if len(failures) > 1 { - r.emitBlock("\n\n") - r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) - for _, specReport := range failures { - highlightColor, heading := "{{red}}", "[FAIL]" - switch specReport.State { - case types.SpecStatePanicked: - highlightColor, heading = "{{magenta}}", "[PANICKED!]" - case types.SpecStateAborted: - highlightColor, heading = "{{coral}}", "[ABORTED]" - case types.SpecStateInterrupted: - highlightColor, heading = "{{orange}}", "[INTERRUPTED]" +func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { + isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) + gw := report.CapturedGinkgoWriterOutput + cursor := 0 + for _, entry := range timeline { + tl := entry.GetTimelineLocation() + if tl.Offset < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) + cursor = tl.Offset + } else if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + cursor = len(gw) + } + switch x := entry.(type) { + case types.Failure: + if isVeryVerbose { + r.emitFailure(indent, report.State, x, false) + } else { + r.emitShortFailure(indent, report.State, x) + } + case types.AdditionalFailure: + if isVeryVerbose { + r.emitFailure(indent, x.State, x.Failure, true) + } else { + r.emitShortFailure(indent, x.State, x.Failure) + } + case types.ReportEntry: + r.emitReportEntry(indent, x) + case types.ProgressReport: + r.emitProgressReport(indent, false, x) + case types.SpecEvent: + if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { + r.emitSpecEvent(indent, x, isVeryVerbose) } - locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true) - r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) } } + if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + } +} - //summarize the suite - if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { - r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) - return +func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { + if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { + r.emitShortFailure(1, state, failure) + } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { + r.emitFailure(1, state, failure, true) } +} - r.emitBlock("\n") - color, status := "{{green}}{{bold}}", "SUCCESS!" - if !report.SuiteSucceeded { - color, status = "{{red}}{{bold}}", "FAIL!" +func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { + r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", + r.humanReadableState(state), + failure.FailureNodeType, + failure.Location, + failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), + )) +} + +func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { + highlightColor := r.highlightColorForState(state) + r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) } - specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes - r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", - specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), - report.PreRunStats.TotalSpecs, - report.RunTime.Seconds()), - ) + if r.conf.FullTrace || failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace)) + } - switch len(report.SpecialSuiteFailureReasons) { - case 0: - r.emit(r.f(color+"%s{{/}} -- ", status)) - case 1: - r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) - default: - r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + if !failure.ProgressReport.IsZero() { + r.emitBlock("\n") + r.emitProgressReport(indent, false, failure.ProgressReport) } - if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { - r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) - } else { - r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) - r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) - if specs.CountOfFlakedSpecs() > 0 { - r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) + if failure.AdditionalFailure != nil && includeAdditionalFailure { + r.emitBlock("\n") + r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) + } +} + +func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { + r.emitDelimiter(1) + + if report.RunningInParallel { + r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + } + shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) + r.emitProgressReport(1, shouldEmitGW, report) + r.emitDelimiter(1) +} + +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { + if report.Message != "" { + r.emitBlock(r.fi(indent, report.Message+"\n")) + indent += 1 + } + if report.LeafNodeText != "" { + subjectIndent := indent + if len(report.ContainerHierarchyTexts) > 0 { + r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) + r.emit(" ") + subjectIndent = 0 } - r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) - r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) + indent += 1 + } + if report.CurrentNodeType != types.NodeTypeInvalid { + r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) + if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { + r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) + } + + r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) + indent += 1 + } + if report.CurrentStepText != "" { + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) + indent += 1 + } + + if indent > 0 { + indent -= 1 + } + + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + } + + if !report.SpecGoroutine().IsZero() { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) + r.emitGoroutines(indent, report.SpecGoroutine()) + } + + if len(report.AdditionalReports) > 0 { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}")) + for i, additionalReport := range report.AdditionalReports { + r.emit(r.fi(indent+1, additionalReport)) + if i < len(report.AdditionalReports)-1 { + r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}")) + } + + highlightedGoroutines := report.HighlightedGoroutines() + if len(highlightedGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) + r.emitGoroutines(indent, highlightedGoroutines...) + } + + otherGoroutines := report.OtherGoroutines() + if len(otherGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) + r.emitGoroutines(indent, otherGoroutines...) } } -/* Emitting to the writer */ -func (r *DefaultReporter) emit(s string) { - if len(s) > 0 { - r.lastChar = s[len(s)-1:] - r.lastEmissionWasDelimiter = false - r.writer.Write([]byte(s)) +func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { + return } + r.emitReportEntry(1, entry) } -func (r *DefaultReporter) emitBlock(s string) { - if len(s) > 0 { - if r.lastChar != "\n" { - r.emit("\n") +func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(indent+1, representation)) + } +} + +func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { + v := r.conf.Verbosity() + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { + r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) + } +} + +func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { + location := "" + if includeLocation { + location = fmt.Sprintf("- %s ", event.CodeLocation.String()) + } + switch event.SpecEventType { + case types.SpecEventInvalid: + return + case types.SpecEventByStart: + r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventByEnd: + r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventNodeStart: + r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventNodeEnd: + r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventSpecRepeat: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventSpecRetry: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } +} + +func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { + for idx, g := range goroutines { + color := "{{gray}}" + if g.HasHighlights() { + color = "{{orange}}" } - r.emit(s) - if r.lastChar != "\n" { + r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) + for _, fc := range g.Stack { + if fc.Highlight { + r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + r.emitSource(indent+3, fc) + } else { + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + } + } + + if idx+1 < len(goroutines) { r.emit("\n") } } } -func (r *DefaultReporter) emitDelimiter() { - if r.lastEmissionWasDelimiter { +func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { + lines := fc.Source + if len(lines) == 0 { return } - r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30))) - r.lastEmissionWasDelimiter = true + + lTrim := 100000 + for _, line := range lines { + lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) + if lTrimLine < lTrim && len(line) > 0 { + lTrim = lTrimLine + } + } + if lTrim == 100000 { + lTrim = 0 + } + + for idx, line := range lines { + if len(line) > lTrim { + line = line[lTrim:] + } + if idx == fc.SourceHighlight { + r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) + } else { + r.emit(r.fi(indent, "| %s\n", line)) + } + } +} + +/* Emitting to the writer */ +func (r *DefaultReporter) emit(s string) { + r._emit(s, false, false) +} + +func (r *DefaultReporter) emitBlock(s string) { + r._emit(s, true, false) +} + +func (r *DefaultReporter) emitDelimiter(indent uint) { + r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) +} + +// a bit ugly - but we're trying to minimize locking on this hot codepath +func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { + if len(s) == 0 { + return + } + r.lock.Lock() + defer r.lock.Unlock() + if isDelimiter && r.lastEmissionWasDelimiter { + return + } + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + } + r.lastCharWasNewline = (s[len(s)-1:] == "\n") + r.writer.Write([]byte(s)) + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + r.lastCharWasNewline = true + } + r.lastEmissionWasDelimiter = isDelimiter } /* Rendering text */ @@ -352,13 +668,14 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) } -func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string { +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) } else { - texts = append(texts, report.LeafNodeText) + texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) locations = append(locations, report.LeafNodeLocation) @@ -368,24 +685,58 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo failureLocation = report.Failure.Location } + highlightIndex := -1 switch report.Failure.FailureNodeContext { case types.FailureNodeAtTopLevel: - texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...) + texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex - texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType) + texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) locations[i] = failureLocation + highlightIndex = i case types.FailureNodeIsLeafNode: i := len(texts) - 1 - texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText) + texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) locations[i] = failureLocation + highlightIndex = i + default: + //there is no failure, so we highlight the leaf ndoe + highlightIndex = len(texts) - 1 } out := "" - if succinct { - out += r.f("%s", r.cycleJoin(texts, " ")) + if veryVerbose { + for i := range texts { + if i == highlightIndex { + out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) + } else { + out += r.fi(uint(i), "%s", texts[i]) + } + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } else { + for i := range texts { + style := "{{/}}" + if i%2 == 1 { + style = "{{gray}}" + } + if i == highlightIndex { + style = highlightColor + "{{bold}}" + } + out += r.f(style+"%s", texts[i]) + if i < len(texts)-1 { + out += " " + } else { + out += r.f("{{/}}") + } + } flattenedLabels := report.Labels() if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) @@ -394,17 +745,15 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) } else { - out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1]) - } - } else { - for i := range texts { - out += r.fi(uint(i), "%s", texts[i]) - if len(labels[i]) > 0 { - out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + leafLocation := locations[len(locations)-1] + if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { + out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) + out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) + } else { + out += r.f("{{gray}}%s{{/}}", leafLocation) } - out += "\n" - out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } + } return out } diff --git a/api/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/api/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go index 89d30076b..613072ebf 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -35,7 +35,7 @@ func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Repor FailOnPending: report.SuiteConfig.FailOnPending, FailFast: report.SuiteConfig.FailFast, FlakeAttempts: report.SuiteConfig.FlakeAttempts, - EmitSpecProgress: report.SuiteConfig.EmitSpecProgress, + EmitSpecProgress: false, DryRun: report.SuiteConfig.DryRun, ParallelNode: report.SuiteConfig.ParallelProcess, ParallelTotal: report.SuiteConfig.ParallelTotal, diff --git a/api/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/api/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go index 7f96c450f..be506f9b4 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -4,12 +4,16 @@ import ( "encoding/json" "fmt" "os" + "path" "github.com/onsi/ginkgo/v2/types" ) -//GenerateJSONReport produces a JSON-formatted report at the passed in destination +// GenerateJSONReport produces a JSON-formatted report at the passed in destination func GenerateJSONReport(report types.Report, destination string) error { + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } f, err := os.Create(destination) if err != nil { return err @@ -25,8 +29,8 @@ func GenerateJSONReport(report types.Report, destination string) error { return f.Close() } -//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources -//It skips over reports that fail to decode but reports on them via the returned messages []string +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { messages := []string{} allReports := []types.Report{} @@ -46,6 +50,9 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, allReports = append(allReports, reports...) } + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } f, err := os.Create(destination) if err != nil { return messages, err diff --git a/api/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/api/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index febcc650b..816042208 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -14,13 +14,34 @@ import ( "encoding/xml" "fmt" "os" + "path" "strings" - "time" "github.com/onsi/ginkgo/v2/config" "github.com/onsi/ginkgo/v2/types" ) +type JunitReportConfig struct { + // Spec States for which no timeline should be emitted for system-err + // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs + OmitTimelinesForSpecState types.SpecState + + // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags + OmitFailureMessageAttr bool + + //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out + OmitCapturedStdOutErr bool + + // Enable OmitSpecLabels to prevent labels from appearing in the spec name + OmitSpecLabels bool + + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name + OmitLeafNodeType bool + + // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes + OmitSuiteSetupNodes bool +} + type JUnitTestSuites struct { XMLName xml.Name `xml:"testsuites"` // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) @@ -128,6 +149,10 @@ type JUnitFailure struct { } func GenerateJUnitReport(report types.Report, dst string) error { + return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) +} + +func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { suite := JUnitTestSuite{ Name: report.SuiteDescription, Package: report.SuitePath, @@ -149,7 +174,6 @@ func GenerateJUnitReport(report types.Report, dst string) error { {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, - {"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, @@ -157,22 +181,33 @@ func GenerateJUnitReport(report types.Report, dst string) error { }, } for _, spec := range report.SpecReports { + if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { + continue + } name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if config.OmitLeafNodeType { + name = "" + } if spec.FullText() != "" { name = name + " " + spec.FullText() } labels := spec.Labels() - if len(labels) > 0 { + if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + name = strings.TrimSpace(name) test := JUnitTestCase{ Name: name, Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), - SystemOut: systemOutForUnstructureReporters(spec), - SystemErr: spec.CapturedGinkgoWriterOutput, + } + if !spec.State.Is(config.OmitTimelinesForSpecState) { + test.SystemErr = systemErrForUnstructuredReporters(spec) + } + if !config.OmitCapturedStdOutErr { + test.SystemOut = systemOutForUnstructuredReporters(spec) } suite.Tests += 1 @@ -191,28 +226,50 @@ func GenerateJUnitReport(report types.Report, dst string) error { test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "failed", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } + suite.Failures += 1 + case types.SpecStateTimedout: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "timedout", + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" } suite.Failures += 1 case types.SpecStateInterrupted: test.Error = &JUnitError{ - Message: "interrupted", + Message: spec.Failure.Message, Type: "interrupted", - Description: spec.Failure.Message, + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" } suite.Errors += 1 case types.SpecStateAborted: test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "aborted", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" } suite.Errors += 1 case types.SpecStatePanicked: test.Error = &JUnitError{ Message: spec.Failure.ForwardedPanic, Type: "panicked", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), + } + if config.OmitFailureMessageAttr { + test.Error.Message = "" } suite.Errors += 1 } @@ -229,6 +286,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { TestSuites: []JUnitTestSuite{suite}, } + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } f, err := os.Create(dst) if err != nil { return err @@ -266,6 +326,9 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) } + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return messages, err + } f, err := os.Create(dst) if err != nil { return messages, err @@ -278,21 +341,27 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) return messages, f.Close() } -func systemOutForUnstructureReporters(spec types.SpecReport) string { - systemOut := spec.CapturedStdOutErr - if len(spec.ReportEntries) > 0 { - systemOut += "\nReport Entries:\n" - for i, entry := range spec.ReportEntries { - systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano)) - if representation := entry.StringRepresentation(); representation != "" { - systemOut += representation + "\n" - } - if i+1 < len(spec.ReportEntries) { - systemOut += "--\n" - } - } +func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) + if len(spec.AdditionalFailures) > 0 { + out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") } - return systemOut + return out.String() +} + +func systemErrForUnstructuredReporters(spec types.SpecReport) string { + return RenderTimeline(spec, true) +} + +func RenderTimeline(spec types.SpecReport, noColor bool) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) + return out.String() +} + +func systemOutForUnstructuredReporters(spec types.SpecReport) string { + return spec.CapturedStdOutErr } // Deprecated JUnitReporter (so folks can still compile their suites) diff --git a/api/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/api/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go index 29f84e7c1..5e726c464 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -9,11 +9,21 @@ type Reporter interface { WillRun(report types.SpecReport) DidRun(report types.SpecReport) SuiteDidEnd(report types.Report) + + //Timeline emission + EmitFailure(state types.SpecState, failure types.Failure) + EmitProgressReport(progressReport types.ProgressReport) + EmitReportEntry(entry types.ReportEntry) + EmitSpecEvent(event types.SpecEvent) } type NoopReporter struct{} -func (n NoopReporter) SuiteWillBegin(report types.Report) {} -func (n NoopReporter) WillRun(report types.SpecReport) {} -func (n NoopReporter) DidRun(report types.SpecReport) {} -func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} +func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/api/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index 2aa2f1845..e990ad82e 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -11,22 +11,26 @@ package reporters import ( "fmt" "os" + "path" "strings" "github.com/onsi/ginkgo/v2/types" ) func tcEscape(s string) string { - s = strings.Replace(s, "|", "||", -1) - s = strings.Replace(s, "'", "|'", -1) - s = strings.Replace(s, "\n", "|n", -1) - s = strings.Replace(s, "\r", "|r", -1) - s = strings.Replace(s, "[", "|[", -1) - s = strings.Replace(s, "]", "|]", -1) + s = strings.ReplaceAll(s, "|", "||") + s = strings.ReplaceAll(s, "'", "|'") + s = strings.ReplaceAll(s, "\n", "|n") + s = strings.ReplaceAll(s, "\r", "|r") + s = strings.ReplaceAll(s, "[", "|[") + s = strings.ReplaceAll(s, "]", "|]") return s } func GenerateTeamcityReport(report types.Report, dst string) error { + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } f, err := os.Create(dst) if err != nil { return err @@ -60,20 +64,24 @@ func GenerateTeamcityReport(report types.Report, dst string) error { } fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) case types.SpecStateFailed: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStatePanicked: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateTimedout: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStateInterrupted: - fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted' details='%s']\n", name, tcEscape(spec.Failure.Message)) + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStateAborted: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) } - fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructureReporters(spec))) - fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(spec.CapturedGinkgoWriterOutput)) + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) } fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) diff --git a/api/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/api/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index 07b4c6122..f33786a2d 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -35,7 +35,7 @@ func CurrentSpecReport() SpecReport { } /* - ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter + ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter - ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted. - ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior). @@ -50,9 +50,9 @@ const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, Report /* AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport. It can take any of the following arguments: - - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. - - A ReportEntryVisibility enum to control the visibility of the ReportEntry - - An Offset or CodeLocation decoration to control the reported location of the ReportEntry + - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. + - A ReportEntryVisibility enum to control the visibility of the ReportEntry + - An Offset or CodeLocation decoration to control the reported location of the ReportEntry If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console. @@ -79,8 +79,11 @@ receives a SpecReport. They are called before the spec starts. You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically */ -func ReportBeforeEach(body func(SpecReport)) bool { - return pushNode(internal.NewReportBeforeEachNode(body, types.NewCodeLocation(1))) +func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)) } /* @@ -90,8 +93,30 @@ receives a SpecReport. They are called after the spec has completed and receive You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically */ -func ReportAfterEach(body func(SpecReport)) bool { - return pushNode(internal.NewReportAfterEachNode(body, types.NewCodeLocation(1))) +func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) +} + +/* +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. + +They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. +ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) + +# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite + +You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. +You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports +*/ +func ReportBeforeSuite(body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } /* @@ -107,10 +132,13 @@ In addition to using ReportAfterSuite to programmatically generate suite reports You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports */ -func ReportAfterSuite(text string, body func(Report)) bool { - return pushNode(internal.NewReportAfterSuiteNode(text, body, types.NewCodeLocation(1))) +func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) } func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { @@ -145,7 +173,8 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re if reporterConfig.TeamcityReport != "" { flags = append(flags, "--teamcity-report") } - pushNode(internal.NewReportAfterSuiteNode( + pushNode(internal.NewNode( + deprecationTracker, types.NodeTypeReportAfterSuite, fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), body, types.NewCustomCodeLocation("autogenerated by Ginkgo"), diff --git a/api/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/api/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index 0db0e7180..ac9b7abb5 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -1,6 +1,7 @@ package ginkgo import ( + "context" "fmt" "reflect" "strings" @@ -12,7 +13,7 @@ import ( /* The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via: - fmt.Sprintf(formatString, parameters...) + fmt.Sprintf(formatString, parameters...) where parameters are the parameters passed into the entry. @@ -31,19 +32,20 @@ DescribeTable describes a table-driven spec. For example: - DescribeTable("a simple table", - func(x int, y int, expected bool) { - Ω(x > y).Should(Equal(expected)) - }, - Entry("x > y", 1, 0, true), - Entry("x == y", 0, 0, false), - Entry("x < y", 0, 1, false), - ) + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ func DescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() generateTable(description, args...) return true } @@ -52,6 +54,7 @@ func DescribeTable(description string, args ...interface{}) bool { You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. */ func FDescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() args = append(args, internal.Focus) generateTable(description, args...) return true @@ -61,6 +64,7 @@ func FDescribeTable(description string, args ...interface{}) bool { You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. */ func PDescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() args = append(args, internal.Pending) generateTable(description, args...) return true @@ -89,29 +93,34 @@ Subsequent arguments accept any Ginkgo decorators. These are filtered out and t Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in. +If you want to generate interruptible specs simply write a Table function that accepts a SpecContext as its first argument. You can then decorate individual Entrys with the NodeTimeout and SpecTimeout decorators. + You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs */ func Entry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* You can focus a particular entry with FEntry. This is equivalent to FIt. */ func FEntry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Focus) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* You can mark a particular entry as pending with PEntry. This is equivalent to PIt. */ func PEntry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Pending) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* @@ -119,12 +128,17 @@ You can mark a particular entry as pending with XEntry. This is equivalent to X */ var XEntry = PEntry +var contextType = reflect.TypeOf(new(context.Context)).Elem() +var specContextType = reflect.TypeOf(new(SpecContext)).Elem() + func generateTable(description string, args ...interface{}) { - cl := types.NewCodeLocation(2) + GinkgoHelper() + cl := types.NewCodeLocation(0) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} var itBody interface{} + var itBodyType reflect.Type var tableLevelEntryDescription interface{} tableLevelEntryDescription = func(args ...interface{}) string { @@ -135,6 +149,10 @@ func generateTable(description string, args ...interface{}) { return "Entry: " + strings.Join(out, ", ") } + if len(args) == 1 { + exitIfErr(types.GinkgoErrors.MissingParametersForTableFunction(cl)) + } + for i, arg := range args { switch t := reflect.TypeOf(arg); { case t == nil: @@ -152,6 +170,7 @@ func generateTable(description string, args ...interface{}) { exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) } itBody = arg + itBodyType = reflect.TypeOf(itBody) default: containerNodeArgs = append(containerNodeArgs, arg) } @@ -164,7 +183,7 @@ func generateTable(description string, args ...interface{}) { var description string switch t := reflect.TypeOf(entry.description); { case t == nil: - err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation) + err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation, false) if err == nil { description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String() } @@ -173,7 +192,7 @@ func generateTable(description string, args ...interface{}) { case t == reflect.TypeOf(""): description = entry.description.(string) case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): - err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation) + err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation, false) if err == nil { description = invokeFunction(entry.description, entry.parameters)[0].String() } @@ -181,17 +200,37 @@ func generateTable(description string, args ...interface{}) { err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } - if err == nil { - err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation) - } itNodeArgs := []interface{}{entry.codeLocation} itNodeArgs = append(itNodeArgs, entry.decorations...) - itNodeArgs = append(itNodeArgs, func() { - if err != nil { - panic(err) + + hasContext := false + if itBodyType.NumIn() > 0. { + if itBodyType.In(0).Implements(specContextType) { + hasContext = true + } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + hasContext = true } - invokeFunction(itBody, entry.parameters) - }) + } + + if err == nil { + err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) + } + + if hasContext { + itNodeArgs = append(itNodeArgs, func(c SpecContext) { + if err != nil { + panic(err) + } + invokeFunction(itBody, append([]interface{}{c}, entry.parameters...)) + }) + } else { + itNodeArgs = append(itNodeArgs, func() { + if err != nil { + panic(err) + } + invokeFunction(itBody, entry.parameters) + }) + } pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) } @@ -223,9 +262,14 @@ func invokeFunction(function interface{}, parameters []interface{}) []reflect.Va return reflect.ValueOf(function).Call(inValues) } -func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation) error { +func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error { funcType := reflect.TypeOf(function) limit := funcType.NumIn() + offset := 0 + if hasContext { + limit = limit - 1 + offset = 1 + } if funcType.IsVariadic() { limit = limit - 1 } @@ -238,13 +282,13 @@ func validateParameters(function interface{}, parameters []interface{}, kind str var i = 0 for ; i < limit; i++ { actual := reflect.TypeOf(parameters[i]) - expected := funcType.In(i) + expected := funcType.In(i + offset) if !(actual == nil) && !actual.AssignableTo(expected) { return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl) } } if funcType.IsVariadic() { - expected := funcType.In(limit).Elem() + expected := funcType.In(limit + offset).Elem() for ; i < len(parameters); i++ { actual := reflect.TypeOf(parameters[i]) if !(actual == nil) && !actual.AssignableTo(expected) { diff --git a/api/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/api/vendor/github.com/onsi/ginkgo/v2/types/code_location.go index 00107d3ad..9cd576817 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/types/code_location.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -7,6 +7,7 @@ import ( "runtime" "runtime/debug" "strings" + "sync" ) type CodeLocation struct { @@ -38,6 +39,73 @@ func (codeLocation CodeLocation) ContentsOfLine() string { return lines[codeLocation.LineNumber-1] } +type codeLocationLocator struct { + pcs map[uintptr]bool + helpers map[string]bool + lock *sync.Mutex +} + +func (c *codeLocationLocator) addHelper(pc uintptr) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pcs[pc] { + return + } + c.lock.Unlock() + f := runtime.FuncForPC(pc) + c.lock.Lock() + if f == nil { + return + } + c.helpers[f.Name()] = true + c.pcs[pc] = true +} + +func (c *codeLocationLocator) hasHelper(name string) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.helpers[name] +} + +func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { + pc := make([]uintptr, 40) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return CodeLocation{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + for { + frame, more := frames.Next() + if !c.hasHelper(frame.Function) { + return CodeLocation{FileName: frame.File, LineNumber: frame.Line} + } + if !more { + break + } + } + return CodeLocation{} +} + +var clLocator = &codeLocationLocator{ + pcs: map[uintptr]bool{}, + helpers: map[string]bool{}, + lock: &sync.Mutex{}, +} + +// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. +func MarkAsHelper(optionalSkip ...int) { + skip := 1 + if len(optionalSkip) > 0 { + skip += optionalSkip[0] + } + pc, _, _, ok := runtime.Caller(skip) + if ok { + clLocator.addHelper(pc) + } +} + func NewCustomCodeLocation(message string) CodeLocation { return CodeLocation{ CustomMessage: message, @@ -45,14 +113,13 @@ func NewCustomCodeLocation(message string) CodeLocation { } func NewCodeLocation(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - return CodeLocation{FileName: file, LineNumber: line} + return clLocator.getCodeLocation(skip + 1) } func NewCodeLocationWithStackTrace(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip+1) - return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} + cl := clLocator.getCodeLocation(skip + 1) + cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) + return cl } // PruneStack removes references to functions that are internal to Ginkgo @@ -65,7 +132,7 @@ func NewCodeLocationWithStackTrace(skip int) CodeLocation { func PruneStack(fullStackTrace string, skip int) string { stack := strings.Split(fullStackTrace, "\n") // Ensure that the even entries are the method names and the - // the odd entries the source code information. + // odd entries the source code information. if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { // Ignore "goroutine 29 [running]:" line. stack = stack[1:] diff --git a/api/vendor/github.com/onsi/ginkgo/v2/types/config.go b/api/vendor/github.com/onsi/ginkgo/v2/types/config.go index 07ef4c3a7..1014c7b49 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -8,6 +8,7 @@ package types import ( "flag" "os" + "path/filepath" "runtime" "strconv" "strings" @@ -26,10 +27,14 @@ type SuiteConfig struct { FailOnPending bool FailFast bool FlakeAttempts int - EmitSpecProgress bool DryRun bool + PollProgressAfter time.Duration + PollProgressInterval time.Duration Timeout time.Duration + EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually OutputInterceptorMode string + SourceRoots []string + GracePeriod time.Duration ParallelProcess int ParallelTotal int @@ -42,6 +47,7 @@ func NewDefaultSuiteConfig() SuiteConfig { Timeout: time.Hour, ParallelProcess: 1, ParallelTotal: 1, + GracePeriod: 30 * time.Second, } } @@ -76,13 +82,12 @@ func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { // Configuration for Ginkgo's reporter type ReporterConfig struct { - NoColor bool - SlowSpecThreshold time.Duration - Succinct bool - Verbose bool - VeryVerbose bool - FullTrace bool - AlwaysEmitGinkgoWriter bool + NoColor bool + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + ShowNodeEvents bool JSONReport string JUnitReport string @@ -105,9 +110,7 @@ func (rc ReporterConfig) WillGenerateReport() bool { } func NewDefaultReporterConfig() ReporterConfig { - return ReporterConfig{ - SlowSpecThreshold: 5 * time.Second, - } + return ReporterConfig{} } // Configuration for the Ginkgo CLI @@ -230,6 +233,9 @@ type deprecatedConfig struct { SlowSpecThresholdWithFLoatUnits float64 Stream bool Notify bool + EmitSpecProgress bool + SlowSpecThreshold time.Duration + AlwaysEmitGinkgoWriter bool } // Flags @@ -270,10 +276,16 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, - {KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug", - Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."}, + {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", + Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, + {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", + Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, + {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", + Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s", + Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."}, {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, @@ -290,6 +302,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", + DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, } // ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) @@ -306,8 +320,6 @@ var ParallelConfigFlags = GinkgoFlags{ var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, suppress color output in default reporter."}, - {KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s", - Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -316,8 +328,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out a very succinct report"}, {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, - {KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed", - Usage: "If set, default reporter prints out captured output of passed tests."}, + {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", + Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -330,6 +342,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, + {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, } // BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process @@ -381,6 +395,10 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) } + if suiteConfig.GracePeriod <= 0 { + errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero()) + } + if len(suiteConfig.FocusFiles) > 0 { _, err := ParseFileFilters(suiteConfig.FocusFiles) if err != nil { @@ -583,13 +601,29 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { goFlagsConfig.Cover = true } + if goFlagsConfig.CoverPkg != "" { + coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",") + adjustedCoverPkgs := make([]string, len(coverPkgs)) + for i, coverPkg := range coverPkgs { + coverPkg = strings.Trim(coverPkg, " ") + if strings.HasPrefix(coverPkg, "./") { + // this is a relative coverPkg - we need to reroot it + adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./")) + } else { + // this is a package name - don't touch it + adjustedCoverPkgs[i] = coverPkg + } + } + goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") + } + args := []string{"test", "-c", "-o", destination, packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, diff --git a/api/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/api/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go index 79ca4593d..e2519f673 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -4,6 +4,7 @@ import ( "os" "strconv" "strings" + "sync" "unicode" "github.com/onsi/ginkgo/v2/formatter" @@ -37,7 +38,7 @@ func (d deprecations) Async() Deprecation { func (d deprecations) Measure() Deprecation { return Deprecation{ - Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.", + Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.", DocLink: "removed-measure", Version: "1.16.3", } @@ -82,13 +83,22 @@ func (d deprecations) Nodot() Deprecation { } } +func (d deprecations) SuppressProgressReporting() Deprecation { + return Deprecation{ + Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", + Version: "2.5.0", + } +} + type DeprecationTracker struct { deprecations map[Deprecation][]CodeLocation + lock *sync.Mutex } func NewDeprecationTracker() *DeprecationTracker { return &DeprecationTracker{ deprecations: map[Deprecation][]CodeLocation{}, + lock: &sync.Mutex{}, } } @@ -102,6 +112,8 @@ func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...Cod } } + d.lock.Lock() + defer d.lock.Unlock() if len(cl) == 1 { d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) } else { @@ -110,10 +122,14 @@ func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...Cod } func (d *DeprecationTracker) DidTrackDeprecations() bool { + d.lock.Lock() + defer d.lock.Unlock() return len(d.deprecations) > 0 } func (d *DeprecationTracker) DeprecationsReport() string { + d.lock.Lock() + defer d.lock.Unlock() out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") out += formatter.F("{{light-yellow}}============================================={{/}}\n") for deprecation, locations := range d.deprecations { diff --git a/api/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/api/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 40331d298..1e0dbfd9d 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -108,8 +108,8 @@ Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" - if nodeType.Is(NodeTypeReportAfterSuite) { - docLink = "reporting-nodes---reportaftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ @@ -125,8 +125,8 @@ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocatio func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" - if nodeType.Is(NodeTypeReportAfterSuite) { - docLink = "reporting-nodes---reportaftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ @@ -180,6 +180,15 @@ func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nod } } +func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly", + Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { return GinkgoError{ Heading: "Unknown Decorator", @@ -189,20 +198,55 @@ func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decor } } +func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + if nodeType.Is(NodeTypeContainer) { + mustGet = "{{bold}}func(){{/}}" + } return GinkgoError{ Heading: "Invalid Function", - Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. + Message: formatter.F(`[%s] node must be passed `+mustGet+`. You passed {{bold}}%s{{/}} instead.`, nodeType, t), CodeLocation: cl, DocLink: "node-decorators-overview", } } +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Multiple Functions", - Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but more than one was passed in.`, nodeType), + Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } @@ -211,12 +255,30 @@ func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Missing Functions", - Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but none was passed in.`, nodeType), + Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } } +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + /* Ordered Container errors */ func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ @@ -236,6 +298,15 @@ func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType N } } +func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error { + return GinkgoError{ + Heading: "ContinueOnFailure not decorating an outermost Ordered Container", + Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.", + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + /* DeferCleanup errors */ func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { return GinkgoError{ @@ -258,7 +329,7 @@ func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), - Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.", + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", CodeLocation: cl, DocLink: "cleaning-up-our-cleanup-code-defercleanup", } @@ -380,6 +451,15 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { } } +func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("No parameters have been passed to the Table Function"), + Message: fmt.Sprintf("The Table Function expected at least 1 parameter"), + CodeLocation: cl, + DocLink: "table-specs", + } +} + func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { return GinkgoError{ Heading: "DescribeTable passed incorrect parameter type", @@ -498,6 +578,13 @@ func (g ginkgoErrors) DryRunInParallelConfiguration() error { } } +func (g ginkgoErrors) GracePeriodCannotBeZero() error { + return GinkgoError{ + Heading: "Ginkgo requires a positive --grace-period.", + Message: "Please set --grace-period to a positive duration. The default is 30s.", + } +} + func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { return GinkgoError{ Heading: "Conflicting reporter verbosity settings.", @@ -532,3 +619,12 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error { Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", } } + +/* Stack-Trace parsing errors */ + +func (g ginkgoErrors) FailedToParseStackTrace(message string) error { + return GinkgoError{ + Heading: "Failed to Parse Stack Trace", + Message: message, + } +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/api/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index 0403f9e63..b0d3b651e 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -272,12 +272,23 @@ func tokenize(input string) func() (*treeNode, error) { } } +func MustParseLabelFilter(input string) LabelFilter { + filter, err := ParseLabelFilter(input) + if err != nil { + panic(err) + } + return filter +} + func ParseLabelFilter(input string) (LabelFilter, error) { if DEBUG_LABEL_FILTER_PARSING { fmt.Println("\n==============") fmt.Println("Input: ", input) fmt.Print("Tokens: ") } + if input == "" { + return func(_ []string) bool { return true }, nil + } nextToken := tokenize(input) root := &treeNode{token: lfTokenRoot} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/api/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go index c64866c60..7b1524b52 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -6,8 +6,8 @@ import ( "time" ) -//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports -//and across the network connection when running in parallel +// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +// and across the network connection when running in parallel type ReportEntryValue struct { raw interface{} //unexported to prevent gob from freaking out about unregistered structs AsJSON string @@ -50,7 +50,6 @@ func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { }{ Representation: rev.String(), } - asJSON, err := json.Marshal(rev.raw) if err != nil { return nil, err @@ -86,10 +85,12 @@ func (rev *ReportEntryValue) GobDecode(data []byte) error { type ReportEntry struct { // Visibility captures the visibility policy for this ReportEntry Visibility ReportEntryVisibility - // Time captures the time the AddReportEntry was called - Time time.Time // Location captures the location of the AddReportEntry call Location CodeLocation + + Time time.Time //need this for backwards compatibility + TimelineLocation TimelineLocation + // Name captures the name of this report Name string // Value captures the (optional) object passed into AddReportEntry - this can be @@ -98,7 +99,7 @@ type ReportEntry struct { Value ReportEntryValue } -// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableStirng() is used to generate their representation. +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation. type ColorableStringer interface { ColorableString() string } @@ -121,6 +122,10 @@ func (entry ReportEntry) GetRawValue() interface{} { return entry.Value.GetRawValue() } +func (entry ReportEntry) GetTimelineLocation() TimelineLocation { + return entry.TimelineLocation +} + type ReportEntries []ReportEntry func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { diff --git a/api/vendor/github.com/onsi/ginkgo/v2/types/types.go b/api/vendor/github.com/onsi/ginkgo/v2/types/types.go index f30d23c66..d048a8ada 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -2,6 +2,8 @@ package types import ( "encoding/json" + "fmt" + "sort" "strings" "time" ) @@ -56,19 +58,20 @@ type Report struct { SuiteConfig SuiteConfig //SpecReports is a list of all SpecReports generated by this test run + //It is empty when the SuiteReport is provided to ReportBeforeSuite SpecReports SpecReports } -//PreRunStats contains a set of stats captured before the test run begins. This is primarily used -//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) -//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +// PreRunStats contains a set of stats captured before the test run begins. This is primarily used +// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. type PreRunStats struct { TotalSpecs int SpecsThatWillRun int } -//Add is ued by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes -//to form a complete final report. +// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +// to form a complete final report. func (report Report) Add(other Report) Report { report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded @@ -147,14 +150,24 @@ type SpecReport struct { // ParallelProcess captures the parallel process that this spec ran on ParallelProcess int + // RunningInParallel captures whether this spec is part of a suite that ran in parallel + RunningInParallel bool + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) //It includes detailed information about the Failure Failure Failure - // NumAttempts captures the number of times this Spec was run. Flakey specs can be retried with - // ginkgo --flake-attempts=N + // NumAttempts captures the number of times this Spec was run. + // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + // Repeated specs can be retried with the use of the MustPassRepeatedly decorator NumAttempts int + // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + MaxFlakeAttempts int + + // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator + MaxMustPassRepeatedly int + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter CapturedGinkgoWriterOutput string @@ -165,6 +178,15 @@ type SpecReport struct { // ReportEntries contains any reports added via `AddReportEntry` ReportEntries ReportEntries + + // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator + ProgressReports []ProgressReport + + // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. + AdditionalFailures []AdditionalFailure + + // SpecEvents capture additional events that occur during the spec run + SpecEvents SpecEvents } func (report SpecReport) MarshalJSON() ([]byte, error) { @@ -184,9 +206,14 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { ParallelProcess int Failure *Failure `json:",omitempty"` NumAttempts int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ ContainerHierarchyTexts: report.ContainerHierarchyTexts, ContainerHierarchyLocations: report.ContainerHierarchyLocations, @@ -203,6 +230,8 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { Failure: nil, ReportEntries: nil, NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, CapturedStdOutErr: report.CapturedStdOutErr, } @@ -213,6 +242,15 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { if len(report.ReportEntries) > 0 { out.ReportEntries = report.ReportEntries } + if len(report.ProgressReports) > 0 { + out.ProgressReports = report.ProgressReports + } + if len(report.AdditionalFailures) > 0 { + out.AdditionalFailures = report.AdditionalFailures + } + if len(report.SpecEvents) > 0 { + out.SpecEvents = report.SpecEvents + } return json.Marshal(out) } @@ -230,13 +268,13 @@ func (report SpecReport) CombinedOutput() string { return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput } -//Failed returns true if report.State is one of the SpecStateFailureStates +// Failed returns true if report.State is one of the SpecStateFailureStates // (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) func (report SpecReport) Failed() bool { return report.State.Is(SpecStateFailureStates) } -//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText func (report SpecReport) FullText() string { texts := []string{} texts = append(texts, report.ContainerHierarchyTexts...) @@ -246,7 +284,7 @@ func (report SpecReport) FullText() string { return strings.Join(texts, " ") } -//Labels returns a deduped set of all the spec's Labels. +// Labels returns a deduped set of all the spec's Labels. func (report SpecReport) Labels() []string { out := []string{} seen := map[string]bool{} @@ -268,7 +306,7 @@ func (report SpecReport) Labels() []string { return out } -//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) if err != nil { @@ -277,29 +315,54 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } -//FileName() returns the name of the file containing the spec +// FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName } -//LineNumber() returns the line number of the leaf node +// LineNumber() returns the line number of the leaf node func (report SpecReport) LineNumber() int { return report.LeafNodeLocation.LineNumber } -//FailureMessage() returns the failure message (or empty string if the test hasn't failed) +// FailureMessage() returns the failure message (or empty string if the test hasn't failed) func (report SpecReport) FailureMessage() string { return report.Failure.Message } -//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) func (report SpecReport) FailureLocation() CodeLocation { return report.Failure.Location } +// Timeline() returns a timeline view of the report +func (report SpecReport) Timeline() Timeline { + timeline := Timeline{} + if !report.Failure.IsZero() { + timeline = append(timeline, report.Failure) + if report.Failure.AdditionalFailure != nil { + timeline = append(timeline, *(report.Failure.AdditionalFailure)) + } + } + for _, additionalFailure := range report.AdditionalFailures { + timeline = append(timeline, additionalFailure) + } + for _, reportEntry := range report.ReportEntries { + timeline = append(timeline, reportEntry) + } + for _, progressReport := range report.ProgressReports { + timeline = append(timeline, progressReport) + } + for _, specEvent := range report.SpecEvents { + timeline = append(timeline, specEvent) + } + sort.Sort(timeline) + return timeline +} + type SpecReports []SpecReport -//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { count := 0 for i := range reports { @@ -319,7 +382,7 @@ func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { return out } -//WithState returns the subset of SpecReports with State matching one of the requested SpecStates +// WithState returns the subset of SpecReports with State matching one of the requested SpecStates func (reports SpecReports) WithState(states SpecState) SpecReports { count := 0 for i := range reports { @@ -338,7 +401,7 @@ func (reports SpecReports) WithState(states SpecState) SpecReports { return out } -//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates func (reports SpecReports) CountWithState(states SpecState) int { n := 0 for i := range reports { @@ -349,17 +412,75 @@ func (reports SpecReports) CountWithState(states SpecState) int { return n } -//CountWithState returns the number of SpecReports that passed after multiple attempts +// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. func (reports SpecReports) CountOfFlakedSpecs() int { n := 0 for i := range reports { - if reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { n += 1 } } return n } +// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfRepeatedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +// TimelineLocation captures the location of an event in the spec's timeline +type TimelineLocation struct { + //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream + Offset int `json:",omitempty"` + + //Order is the order of the event with respect to other events. The absolute value of Order + //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order + Order int `json:",omitempty"` + + Time time.Time +} + +// TimelineEvent represent an event on the timeline +// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it +type TimelineEvent interface { + GetTimelineLocation() TimelineLocation +} + +type Timeline []TimelineEvent + +func (t Timeline) Len() int { return len(t) } +func (t Timeline) Less(i, j int) bool { + return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order +} +func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t Timeline) WithoutHiddenReportEntries() Timeline { + out := Timeline{} + for _, event := range t { + if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever { + continue + } + out = append(out, event) + } + return out +} + +func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline { + out := Timeline{} + for _, event := range t { + if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() { + continue + } + out = append(out, event) + } + return out +} + // Failure captures failure information for an individual test type Failure struct { // Message - the failure message passed into Fail(...). When using a matcher library @@ -372,6 +493,8 @@ type Failure struct { // This CodeLocation will include a fully-populated StackTrace Location CodeLocation + TimelineLocation TimelineLocation + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) // then ForwardedPanic will be populated with a string representation of the captured panic. ForwardedPanic string `json:",omitempty"` @@ -379,19 +502,32 @@ type Failure struct { // FailureNodeContext - one of three contexts describing the node in which the failure occurred: // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. - // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocaiton, and FailureNodeContainerIndex will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated. // // FailureNodeType will contain the NodeType of the node in which the failure occurred. // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. - FailureNodeContext FailureNodeContext - FailureNodeType NodeType - FailureNodeLocation CodeLocation - FailureNodeContainerIndex int + FailureNodeContext FailureNodeContext `json:",omitempty"` + + FailureNodeType NodeType `json:",omitempty"` + + FailureNodeLocation CodeLocation `json:",omitempty"` + + FailureNodeContainerIndex int `json:",omitempty"` + + //ProgressReport is populated if the spec was interrupted or timed out + ProgressReport ProgressReport `json:",omitempty"` + + //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck. + AdditionalFailure *AdditionalFailure `json:",omitempty"` } func (f Failure) IsZero() bool { - return f == Failure{} + return f.Message == "" && (f.Location == CodeLocation{}) +} + +func (f Failure) GetTimelineLocation() TimelineLocation { + return f.TimelineLocation } // FailureNodeContext captures the location context for the node containing the failing line of code @@ -424,6 +560,18 @@ func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { return fncEnumSupport.MarshJSON(uint(fnc)) } +// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec +// these typically occur in clean up nodes after the spec has failed. +// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is +type AdditionalFailure struct { + State SpecState + Failure Failure +} + +func (f AdditionalFailure) GetTimelineLocation() TimelineLocation { + return f.Failure.TimelineLocation +} + // SpecState captures the state of a spec // To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` type SpecState uint @@ -438,6 +586,7 @@ const ( SpecStateAborted SpecStatePanicked SpecStateInterrupted + SpecStateTimedout ) var ssEnumSupport = NewEnumSupport(map[uint]string{ @@ -449,11 +598,15 @@ var ssEnumSupport = NewEnumSupport(map[uint]string{ uint(SpecStateAborted): "aborted", uint(SpecStatePanicked): "panicked", uint(SpecStateInterrupted): "interrupted", + uint(SpecStateTimedout): "timedout", }) func (ss SpecState) String() string { return ssEnumSupport.String(uint(ss)) } +func (ss SpecState) GomegaString() string { + return ssEnumSupport.String(uint(ss)) +} func (ss *SpecState) UnmarshalJSON(b []byte) error { out, err := ssEnumSupport.UnmarshJSON(b) *ss = SpecState(out) @@ -463,12 +616,131 @@ func (ss SpecState) MarshalJSON() ([]byte, error) { return ssEnumSupport.MarshJSON(uint(ss)) } -var SpecStateFailureStates = SpecStateFailed | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted +var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted func (ss SpecState) Is(states SpecState) bool { return ss&states != 0 } +// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace +type ProgressReport struct { + Message string `json:",omitempty"` + ParallelProcess int `json:",omitempty"` + RunningInParallel bool `json:",omitempty"` + + ContainerHierarchyTexts []string `json:",omitempty"` + LeafNodeText string `json:",omitempty"` + LeafNodeLocation CodeLocation `json:",omitempty"` + SpecStartTime time.Time `json:",omitempty"` + + CurrentNodeType NodeType `json:",omitempty"` + CurrentNodeText string `json:",omitempty"` + CurrentNodeLocation CodeLocation `json:",omitempty"` + CurrentNodeStartTime time.Time `json:",omitempty"` + + CurrentStepText string `json:",omitempty"` + CurrentStepLocation CodeLocation `json:",omitempty"` + CurrentStepStartTime time.Time `json:",omitempty"` + + AdditionalReports []string `json:",omitempty"` + + CapturedGinkgoWriterOutput string `json:",omitempty"` + TimelineLocation TimelineLocation `json:",omitempty"` + + Goroutines []Goroutine `json:",omitempty"` +} + +func (pr ProgressReport) IsZero() bool { + return pr.CurrentNodeType == NodeTypeInvalid +} + +func (pr ProgressReport) Time() time.Time { + return pr.TimelineLocation.Time +} + +func (pr ProgressReport) SpecGoroutine() Goroutine { + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine { + return goroutine + } + } + return Goroutine{} +} + +func (pr ProgressReport) HighlightedGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) OtherGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { + out := pr + out.CapturedGinkgoWriterOutput = "" + return out +} + +func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport { + out := pr + filteredGoroutines := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + filteredGoroutines = append(filteredGoroutines, goroutine) + } + } + out.Goroutines = filteredGoroutines + return out +} + +func (pr ProgressReport) GetTimelineLocation() TimelineLocation { + return pr.TimelineLocation +} + +type Goroutine struct { + ID uint64 + State string + Stack []FunctionCall + IsSpecGoroutine bool +} + +func (g Goroutine) IsZero() bool { + return g.ID == 0 +} + +func (g Goroutine) HasHighlights() bool { + for _, fc := range g.Stack { + if fc.Highlight { + return true + } + } + + return false +} + +type FunctionCall struct { + Function string + Filename string + Line int + Highlight bool `json:",omitempty"` + Source []string `json:",omitempty"` + SourceHighlight int `json:",omitempty"` +} + // NodeType captures the type of a given Ginkgo Node type NodeType uint @@ -493,6 +765,7 @@ const ( NodeTypeReportBeforeEach NodeTypeReportAfterEach + NodeTypeReportBeforeSuite NodeTypeReportAfterSuite NodeTypeCleanupInvalid @@ -502,7 +775,9 @@ const ( ) var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt -var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeInvalid): "INVALID NODE TYPE", @@ -520,9 +795,10 @@ var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", uint(NodeTypeReportBeforeEach): "ReportBeforeEach", uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite", uint(NodeTypeReportAfterSuite): "ReportAfterSuite", - uint(NodeTypeCleanupInvalid): "INVALID CLEANUP NODE", - uint(NodeTypeCleanupAfterEach): "DeferCleanup", + uint(NodeTypeCleanupInvalid): "DeferCleanup", + uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", }) @@ -542,3 +818,99 @@ func (nt NodeType) MarshalJSON() ([]byte, error) { func (nt NodeType) Is(nodeTypes NodeType) bool { return nt&nodeTypes != 0 } + +/* +SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events. +*/ +type SpecEvent struct { + SpecEventType SpecEventType + + CodeLocation CodeLocation + TimelineLocation TimelineLocation + + Message string `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + NodeType NodeType `json:",omitempty"` + Attempt int `json:",omitempty"` +} + +func (se SpecEvent) GetTimelineLocation() TimelineLocation { + return se.TimelineLocation +} + +func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool { + return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd) +} + +func (se SpecEvent) GomegaString() string { + out := &strings.Builder{} + out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ") + if se.Message != "" { + out.WriteString("Message=") + out.WriteString(`"` + se.Message + `",`) + } + if se.Duration != 0 { + out.WriteString("Duration=" + se.Duration.String() + ",") + } + if se.NodeType != NodeTypeInvalid { + out.WriteString("NodeType=" + se.NodeType.String() + ",") + } + if se.Attempt != 0 { + out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",") + } + out.WriteString("CL=" + se.CodeLocation.String() + ",") + out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset)) + + return out.String() +} + +type SpecEvents []SpecEvent + +func (se SpecEvents) WithType(seType SpecEventType) SpecEvents { + out := SpecEvents{} + for _, event := range se { + if event.SpecEventType.Is(seType) { + out = append(out, event) + } + } + return out +} + +type SpecEventType uint + +const ( + SpecEventInvalid SpecEventType = 0 + + SpecEventByStart SpecEventType = 1 << iota + SpecEventByEnd + SpecEventNodeStart + SpecEventNodeEnd + SpecEventSpecRepeat + SpecEventSpecRetry +) + +var seEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecEventInvalid): "INVALID SPEC EVENT", + uint(SpecEventByStart): "By", + uint(SpecEventByEnd): "By (End)", + uint(SpecEventNodeStart): "Node", + uint(SpecEventNodeEnd): "Node (End)", + uint(SpecEventSpecRepeat): "Repeat", + uint(SpecEventSpecRetry): "Retry", +}) + +func (se SpecEventType) String() string { + return seEnumSupport.String(uint(se)) +} +func (se *SpecEventType) UnmarshalJSON(b []byte) error { + out, err := seEnumSupport.UnmarshJSON(b) + *se = SpecEventType(out) + return err +} +func (se SpecEventType) MarshalJSON() ([]byte, error) { + return seEnumSupport.MarshJSON(uint(se)) +} + +func (se SpecEventType) Is(specEventTypes SpecEventType) bool { + return se&specEventTypes != 0 +} diff --git a/api/vendor/github.com/onsi/ginkgo/v2/types/version.go b/api/vendor/github.com/onsi/ginkgo/v2/types/version.go index 74261450b..6bc46150e 100644 --- a/api/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/api/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.1.3" +const VERSION = "2.9.7" diff --git a/api/vendor/github.com/onsi/gomega/.gitignore b/api/vendor/github.com/onsi/gomega/.gitignore index 720c13cba..425d0a509 100644 --- a/api/vendor/github.com/onsi/gomega/.gitignore +++ b/api/vendor/github.com/onsi/gomega/.gitignore @@ -3,3 +3,5 @@ . .idea gomega.iml +TODO +.vscode \ No newline at end of file diff --git a/api/vendor/github.com/onsi/gomega/CHANGELOG.md b/api/vendor/github.com/onsi/gomega/CHANGELOG.md index e3b437985..9b83dd6d4 100644 --- a/api/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/api/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,281 @@ +## 1.27.8 + +### Fixes +- HaveExactElement should not call FailureMessage if a submatcher returned an error [096f392] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.5 to 2.9.7 (#669) [8884bee] + +## 1.27.7 + +### Fixes +- fix: gcustom.MakeMatcher accepts nil as actual value (#666) [57054d5] + +### Maintenance +- update gitignore [05c1bc6] +- Bump github.com/onsi/ginkgo/v2 from 2.9.4 to 2.9.5 (#663) [7cadcf6] +- Bump golang.org/x/net from 0.9.0 to 0.10.0 (#662) [b524839] +- Bump github.com/onsi/ginkgo/v2 from 2.9.2 to 2.9.4 (#661) [5f44694] +- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#657) [05dc99a] +- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#658) [3a033d1] +- Replace deprecated NewGomegaWithT with NewWithT (#659) [a19238f] +- Bump golang.org/x/net from 0.8.0 to 0.9.0 (#656) [29ed041] +- Bump actions/setup-go from 3 to 4 (#651) [11b2080] + +## 1.27.6 + +### Fixes +- Allow collections matchers to work correctly when expected has nil elements [60e7cf3] + +### Maintenance +- updates MatchError godoc comment to also accept a Gomega matcher (#654) [67b869d] + +## 1.27.5 + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.1 to 2.9.2 (#653) [a215021] +- Bump github.com/go-task/slim-sprig (#652) [a26fed8] + +## 1.27.4 + +### Fixes +- improve error formatting and remove duplication of error message in Eventually/Consistently [854f075] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.0 to 2.9.1 (#650) [ccebd9b] + +## 1.27.3 + +### Fixes +- format.Object now always includes err.Error() when passed an error [86d97ef] +- Fix HaveExactElements to work inside ContainElement or other collection matchers (#648) [636757e] + +### Maintenance +- Bump github.com/golang/protobuf from 1.5.2 to 1.5.3 (#649) [cc16689] +- Bump github.com/onsi/ginkgo/v2 from 2.8.4 to 2.9.0 (#646) [e783366] + +## 1.27.2 + +### Fixes +- improve poll progress message when polling a consistently that has been passing [28a319b] + +### Maintenance +- bump ginkgo +- remove tools.go hack as Ginkgo 2.8.2 automatically pulls in the cli dependencies [81443b3] + +## 1.27.1 + +### Maintenance + +- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#640) [bc686cd] + +## 1.27.0 + +### Features +- Add HaveExactElements matcher (#634) [9d50783] +- update Gomega docs to discuss GinkgoHelper() [be32774] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.8.0 to 2.8.1 (#639) [296a68b] +- Bump golang.org/x/net from 0.5.0 to 0.6.0 (#638) [c2b098b] +- Bump github-pages from 227 to 228 in /docs (#636) [a9069ab] +- test: update matrix for Go 1.20 (#635) [6bd25c8] +- Bump github.com/onsi/ginkgo/v2 from 2.7.0 to 2.8.0 (#631) [5445f8b] +- Bump webrick from 1.7.0 to 1.8.1 in /docs (#630) [03e93bb] +- codeql: add ruby language (#626) [63c7d21] +- dependabot: add bundler package-ecosystem for docs (#625) [d92f963] + +## 1.26.0 + +### Features +- When a polled function returns an error, keep track of the actual and report on the matcher state of the last non-errored actual [21f3090] +- improve eventually failure message output [c530fb3] + +### Fixes +- fix several documentation spelling issues [e2eff1f] + + +## 1.25.0 + +### Features +- add `MustPassRepeatedly(int)` to asyncAssertion (#619) [4509f72] +- compare unwrapped errors using DeepEqual (#617) [aaeaa5d] + +### Maintenance +- Bump golang.org/x/net from 0.4.0 to 0.5.0 (#614) [c7cfea4] +- Bump github.com/onsi/ginkgo/v2 from 2.6.1 to 2.7.0 (#615) [71b8adb] +- Docs: Fix typo "MUltiple" -> "Multiple" (#616) [9351dda] +- clean up go.sum [cd1dc1d] + +## 1.24.2 + +### Fixes +- Correctly handle assertion failure panics for eventually/consistnetly "g Gomega"s in a goroutine [78f1660] +- docs:Fix typo "you an" -> "you can" (#607) [3187c1f] +- fixes issue #600 (#606) [808d192] + +### Maintenance +- Bump golang.org/x/net from 0.2.0 to 0.4.0 (#611) [6ebc0bf] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#612) [258cfc8] +- Bump github.com/onsi/ginkgo/v2 from 2.5.0 to 2.5.1 (#609) [e6c3eb9] + +## 1.24.1 + +### Fixes +- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e] +- fix small typo (#601) [ea0ebe6] + +### Maintenance +- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372] +- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb] +- fix label-filter in test.yml [d795db6] +- stop running flakey tests and rely on external network dependencies in CI [7133290] + +## 1.24.0 + +### Features + +Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. + +This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable. + +### Maintenance + +- Update BeComparableTo documentation [756eaa0] + +## 1.23.0 + +### Features +- Custom formatting on a per-type basis can be provided using `format.RegisterCustomFormatter()` -- see the docs [here](https://onsi.github.io/gomega/#adjusting-output) + +- Substantial improvement have been made to `StopTrying()`: + - Users can now use `StopTrying().Wrap(err)` to wrap errors and `StopTrying().Attach(description, object)` to attach arbitrary objects to the `StopTrying()` error + - `StopTrying()` is now always interpreted as a failure. If you are an early adopter of `StopTrying()` you may need to change your code as the prior version would match against the returned value even if `StopTrying()` was returned. Going forward the `StopTrying()` api should remain stable. + - `StopTrying()` and `StopTrying().Now()` can both be used in matchers - not just polled functions. + +- `TryAgainAfter(duration)` is used like `StopTrying()` but instructs `Eventually` and `Consistently` that the poll should be tried again after the specified duration. This allows you to dynamically adjust the polling duration. + +- `ctx` can now be passed-in as the first argument to `Eventually` and `Consistently`. + +## Maintenance + +- Bump github.com/onsi/ginkgo/v2 from 2.3.0 to 2.3.1 (#597) [afed901] +- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#599) [7c691b3] +- Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 (#587) [ff22665] + +## 1.22.1 + +## Fixes +- When passed a context and no explicit timeout, Eventually will only timeout when the context is cancelled [e5105cf] +- Allow StopTrying() to be wrapped [bf3cba9] + +## Maintenance +- bump to ginkgo v2.3.0 [c5d5c39] + +## 1.22.0 + +### Features + +Several improvements have been made to `Eventually` and `Consistently` in this and the most recent releases: + +- Eventually and Consistently can take a context.Context [65c01bc] + This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts. +- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9] +- Eventually/Consistently will forward an attached context to functions that ask for one [e2091c5] +- Eventually/Consistently supports passing arguments to functions via WithArguments() [a2dc7c3] +- Eventually and Consistently can now be stopped early with StopTrying(message) and StopTrying(message).Now() [52976bb] + +These improvements are all documented in [Gomega's docs](https://onsi.github.io/gomega/#making-asynchronous-assertions) + +## Fixes + +## Maintenance + +## 1.21.1 + +### Features +- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9] + +## 1.21.0 + +### Features +- Eventually and Consistently can take a context.Context [65c01bc] + This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts. +- Introduces Eventually.Within.ProbeEvery with tests and documentation (#591) [f633800] +- New BeKeyOf matcher with documentation and unit tests (#590) [fb586b3] + +## Fixes +- Cover the entire gmeasure suite with leak detection [8c54344] +- Fix gmeasure leak [119d4ce] +- Ignore new Ginkgo ProgressSignal goroutine in gleak [ba548e2] + +## Maintenance + +- Fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency (#596) [12469a0] + + +## 1.20.2 + +## Fixes +- label specs that rely on remote access; bump timeout on short-circuit test to make it less flaky [35eeadf] +- gexec: allow more headroom for SIGABRT-related unit tests (#581) [5b78f40] +- Enable reading from a closed gbytes.Buffer (#575) [061fd26] + +## Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.1.5 to 2.1.6 (#583) [55d895b] +- Bump github.com/onsi/ginkgo/v2 from 2.1.4 to 2.1.5 (#582) [346de7c] + +## 1.20.1 + +## Fixes +- fix false positive gleaks when using ginkgo -p (#577) [cb46517] +- Fix typos in gomega_dsl.go (#569) [5f71ed2] +- don't panic on Eventually(nil), fixing #555 (#567) [9d1186f] +- vet optional description args in assertions, fixing #560 (#566) [8e37808] + +## Maintenance +- test: add new Go 1.19 to test matrix (#571) [40d7efe] +- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#564) [5f26371] + +## 1.20.0 + +## Features +- New [`gleak`](https://onsi.github.io/gomega/#codegleakcode-finding-leaked-goroutines) experimental goroutine leak detection package! (#538) [85ba7bc] +- New `BeComparableTo` matcher(#546) that uses `gocmp` to make comparisons [e77ea75] +- New `HaveExistingField` matcher (#553) [fd130e1] +- Document how to wrap Gomega (#539) [56714a4] + +## Fixes +- Support pointer receivers in HaveField; fixes #543 (#544) [8dab36e] + +## Maintenance +- Bump various dependencies: + - Upgrade to yaml.v3 (#556) [f5a83b1] + - Bump github/codeql-action from 1 to 2 (#549) [52f5adf] + - Bump github.com/google/go-cmp from 0.5.7 to 0.5.8 (#551) [5f3942d] + - Bump nokogiri from 1.13.4 to 1.13.6 in /docs (#554) [eb4b4c2] + - Use latest ginkgo (#535) [1c29028] + - Bump nokogiri from 1.13.3 to 1.13.4 in /docs (#541) [1ce84d5] + - Bump actions/setup-go from 2 to 3 (#540) [755485e] + - Bump nokogiri from 1.12.5 to 1.13.3 in /docs (#522) [4fbb0dc] + - Bump actions/checkout from 2 to 3 (#526) [ac49202] + +## 1.19.0 + +## Features +- New [`HaveEach`](https://onsi.github.io/gomega/#haveeachelement-interface) matcher to ensure that each and every element in an `array`, `slice`, or `map` satisfies the passed in matcher. (#523) [9fc2ae2] (#524) [c8ba582] +- Users can now wrap the `Gomega` interface to implement custom behavior on each assertion. (#521) [1f2e714] +- [`ContainElement`](https://onsi.github.io/gomega/#containelementelement-interface) now accepts an additional pointer argument. Elements that satisfy the matcher are stored in the pointer enabling developers to easily add subsequent, more detailed, assertions against the matching element. (#527) [1a4e27f] + +## Fixes +- update RELEASING instructions to match ginkgo [0917cde] +- Bump github.com/onsi/ginkgo/v2 from 2.0.0 to 2.1.3 (#519) [49ab4b0] +- Fix CVE-2021-38561 (#534) [f1b4456] +- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497] +- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5] +- Fix for Go 1.18 (#532) [56d2a29] +- Document precendence of timeouts (#533) [b607941] + ## 1.18.1 ## Fixes diff --git a/api/vendor/github.com/onsi/gomega/RELEASING.md b/api/vendor/github.com/onsi/gomega/RELEASING.md index 998d64ee7..9973fff49 100644 --- a/api/vendor/github.com/onsi/gomega/RELEASING.md +++ b/api/vendor/github.com/onsi/gomega/RELEASING.md @@ -1,12 +1,23 @@ A Gomega release is a tagged sha and a GitHub release. To cut a release: 1. Ensure CHANGELOG.md is up to date. - - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Use + ```bash + LAST_VERSION=$(git tag --sort=version:refname | tail -n1) + CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + ``` + to update the changelog - Categorize the changes into - Breaking Changes (requires a major version) - New Features (minor version) - Fixes (fix version) - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) -2. Update GOMEGA_VERSION in `gomega_dsl.go` -3. Push a commit with the version number as the commit message (e.g. `v1.3.0`) -4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. +1. Update GOMEGA_VERSION in `gomega_dsl.go` +1. Commit, push, and release: + ``` + git commit -m "vM.m.p" + git push + gh release create "vM.m.p" + git fetch --tags origin master + ``` \ No newline at end of file diff --git a/api/vendor/github.com/onsi/gomega/format/format.go b/api/vendor/github.com/onsi/gomega/format/format.go index 6e78c391d..56bdd053b 100644 --- a/api/vendor/github.com/onsi/gomega/format/format.go +++ b/api/vendor/github.com/onsi/gomega/format/format.go @@ -52,7 +52,7 @@ var CharactersAroundMismatchToInclude uint = 5 var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() var timeType = reflect.TypeOf(time.Time{}) -//The default indentation string emitted by the format package +// The default indentation string emitted by the format package var Indent = " " var longFormThreshold = 20 @@ -65,6 +65,52 @@ type GomegaStringer interface { GomegaString() string } +/* +CustomFormatters can be registered with Gomega via RegisterCustomFormatter() +Any value to be rendered by Gomega is passed to each registered CustomFormatters. +The CustomFormatter signals that it will handle formatting the value by returning (formatted-string, true) +If the CustomFormatter does not want to handle the object it should return ("", false) + +Strings returned by CustomFormatters are not truncated +*/ +type CustomFormatter func(value interface{}) (string, bool) +type CustomFormatterKey uint + +var customFormatterKey CustomFormatterKey = 1 + +type customFormatterKeyPair struct { + CustomFormatter + CustomFormatterKey +} + +/* +RegisterCustomFormatter registers a CustomFormatter and returns a CustomFormatterKey + +You can call UnregisterCustomFormatter with the returned key to unregister the associated CustomFormatter +*/ +func RegisterCustomFormatter(customFormatter CustomFormatter) CustomFormatterKey { + key := customFormatterKey + customFormatterKey += 1 + customFormatters = append(customFormatters, customFormatterKeyPair{customFormatter, key}) + return key +} + +/* +UnregisterCustomFormatter unregisters a previously registered CustomFormatter. You should pass in the key returned by RegisterCustomFormatter +*/ +func UnregisterCustomFormatter(key CustomFormatterKey) { + formatters := []customFormatterKeyPair{} + for _, f := range customFormatters { + if f.CustomFormatterKey == key { + continue + } + formatters = append(formatters, f) + } + customFormatters = formatters +} + +var customFormatters = []customFormatterKeyPair{} + /* Generates a formatted matcher success/failure message of the form: @@ -212,24 +258,35 @@ Set PrintContextObjects to true to print the content of objects implementing con func Object(object interface{}, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) - return fmt.Sprintf("%s<%s>: %s", indent, formatType(value), formatValue(value, indentation)) + commonRepresentation := "" + if err, ok := object.(error); ok { + commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent + } + return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation)) } /* IndentString takes a string and indents each line by the specified amount. */ func IndentString(s string, indentation uint) string { + return indentString(s, indentation, true) +} + +func indentString(s string, indentation uint, indentFirstLine bool) string { + result := &strings.Builder{} components := strings.Split(s, "\n") - result := "" indent := strings.Repeat(Indent, int(indentation)) for i, component := range components { - result += indent + component + if i > 0 || indentFirstLine { + result.WriteString(indent) + } + result.WriteString(component) if i < len(components)-1 { - result += "\n" + result.WriteString("\n") } } - return result + return result.String() } func formatType(v reflect.Value) string { @@ -261,18 +318,27 @@ func formatValue(value reflect.Value, indentation uint) string { if value.CanInterface() { obj := value.Interface() + // if a CustomFormatter handles this values, we'll go with that + for _, customFormatter := range customFormatters { + formatted, handled := customFormatter.CustomFormatter(obj) + // do not truncate a user-provided CustomFormatter() + if handled { + return indentString(formatted, indentation+1, false) + } + } + // GomegaStringer will take precedence to other representations and disregards UseStringerRepresentation if x, ok := obj.(GomegaStringer); ok { - // do not truncate a user-defined GoMegaString() value - return x.GomegaString() + // do not truncate a user-defined GomegaString() value + return indentString(x.GomegaString(), indentation+1, false) } if UseStringerRepresentation { switch x := obj.(type) { case fmt.GoStringer: - return truncateLongStrings(x.GoString()) + return indentString(truncateLongStrings(x.GoString()), indentation+1, false) case fmt.Stringer: - return truncateLongStrings(x.String()) + return indentString(truncateLongStrings(x.String()), indentation+1, false) } } } diff --git a/api/vendor/github.com/onsi/gomega/gomega_dsl.go b/api/vendor/github.com/onsi/gomega/gomega_dsl.go index 6936e2411..bc7ec293d 100644 --- a/api/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/api/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.18.1" +const GOMEGA_VERSION = "1.27.8" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -34,7 +34,7 @@ Depending on your vendoring solution you may be inadvertently importing gomega a // to abstract between the standard package-level function implementations // and alternatives like *WithT. // -// The types in the top-level DSL have gotten a bit messy due to earlier depracations that avoid stuttering +// The types in the top-level DSL have gotten a bit messy due to earlier deprecations that avoid stuttering // and due to an accidental use of a concrete type (*WithT) in an earlier release. // // As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object @@ -52,7 +52,7 @@ var Default = Gomega(internal.NewGomega(internal.FetchDefaultDurationBundle())) // rich ecosystem of matchers without causing a test to fail. For example, to aggregate a series of potential failures // or for use in a non-test setting. func NewGomega(fail types.GomegaFailHandler) Gomega { - return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithFailHandler(fail) + return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithFailHandler(fail) } // WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage @@ -69,17 +69,31 @@ type WithT = internal.Gomega // GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. type GomegaWithT = WithT -// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with +// inner is an interface that allows users to provide a wrapper around Default. The wrapper +// must implement the inner interface and return either the original Default or the result of +// a call to NewGomega(). +type inner interface { + Inner() Gomega +} + +func internalGomega(g Gomega) *internal.Gomega { + if v, ok := g.(inner); ok { + return v.Inner().(*internal.Gomega) + } + return g.(*internal.Gomega) +} + +// NewWithT takes a *testing.T and returns a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with // Gomega's rich ecosystem of matchers in standard `testing` test suits. // -// func TestFarmHasCow(t *testing.T) { -// g := gomega.NewWithT(t) +// func TestFarmHasCow(t *testing.T) { +// g := gomega.NewWithT(t) // -// f := farm.New([]string{"Cow", "Horse"}) -// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") -// } +// f := farm.New([]string{"Cow", "Horse"}) +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } func NewWithT(t types.GomegaTestingT) *WithT { - return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithT(t) + return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t) } // NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. @@ -88,37 +102,37 @@ var NewGomegaWithT = NewWithT // RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails // the fail handler passed into RegisterFailHandler is called. func RegisterFailHandler(fail types.GomegaFailHandler) { - Default.(*internal.Gomega).ConfigureWithFailHandler(fail) + internalGomega(Default).ConfigureWithFailHandler(fail) } // RegisterFailHandlerWithT is deprecated and will be removed in a future release. // users should use RegisterFailHandler, or RegisterTestingT func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandler) { fmt.Println("RegisterFailHandlerWithT is deprecated. Please use RegisterFailHandler or RegisterTestingT instead.") - Default.(*internal.Gomega).ConfigureWithFailHandler(fail) + internalGomega(Default).ConfigureWithFailHandler(fail) } // RegisterTestingT connects Gomega to Golang's XUnit style // Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test. func RegisterTestingT(t types.GomegaTestingT) { - Default.(*internal.Gomega).ConfigureWithT(t) + internalGomega(Default).ConfigureWithT(t) } // InterceptGomegaFailures runs a given callback and returns an array of // failure messages generated by any Gomega assertions within the callback. -// Exeuction continues after the first failure allowing users to collect all failures +// Execution continues after the first failure allowing users to collect all failures // in the callback. // // This is most useful when testing custom matchers, but can also be used to check // on a value using a Gomega assertion without causing a test failure. func InterceptGomegaFailures(f func()) []string { - originalHandler := Default.(*internal.Gomega).Fail + originalHandler := internalGomega(Default).Fail failures := []string{} - Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { + internalGomega(Default).Fail = func(message string, callerSkip ...int) { failures = append(failures, message) } defer func() { - Default.(*internal.Gomega).Fail = originalHandler + internalGomega(Default).Fail = originalHandler }() f() return failures @@ -131,14 +145,14 @@ func InterceptGomegaFailures(f func()) []string { // does not register a failure with the FailHandler registered via RegisterFailHandler - it is up // to the user to decide what to do with the returned error func InterceptGomegaFailure(f func()) (err error) { - originalHandler := Default.(*internal.Gomega).Fail - Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { + originalHandler := internalGomega(Default).Fail + internalGomega(Default).Fail = func(message string, callerSkip ...int) { err = errors.New(message) panic("stop execution") } defer func() { - Default.(*internal.Gomega).Fail = originalHandler + internalGomega(Default).Fail = originalHandler if e := recover(); e != nil { if err == nil { panic(e) @@ -151,13 +165,14 @@ func InterceptGomegaFailure(f func()) (err error) { } func ensureDefaultGomegaIsConfigured() { - if !Default.(*internal.Gomega).IsConfigured() { + if !internalGomega(Default).IsConfigured() { panic(nilGomegaPanic) } } // Ω wraps an actual value allowing assertions to be made on it: -// Ω("foo").Should(Equal("foo")) +// +// Ω("foo").Should(Equal("foo")) // // If Ω is passed more than one argument it will pass the *first* argument to the matcher. // All subsequent arguments will be required to be nil/zero. @@ -166,10 +181,13 @@ func ensureDefaultGomegaIsConfigured() { // a value and an error - a common patter in Go. // // For example, given a function with signature: -// func MyAmazingThing() (int, error) +// +// func MyAmazingThing() (int, error) // // Then: -// Ω(MyAmazingThing()).Should(Equal(3)) +// +// Ω(MyAmazingThing()).Should(Equal(3)) +// // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Ω and Expect are identical @@ -179,19 +197,23 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { } // Expect wraps an actual value allowing assertions to be made on it: -// Expect("foo").To(Equal("foo")) +// +// Expect("foo").To(Equal("foo")) // // If Expect is passed more than one argument it will pass the *first* argument to the matcher. // All subsequent arguments will be required to be nil/zero. // // This is convenient if you want to make an assertion on a method/function that returns -// a value and an error - a common patter in Go. +// a value and an error - a common pattern in Go. // // For example, given a function with signature: -// func MyAmazingThing() (int, error) +// +// func MyAmazingThing() (int, error) // // Then: -// Expect(MyAmazingThing()).Should(Equal(3)) +// +// Expect(MyAmazingThing()).Should(Equal(3)) +// // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Expect and Ω are identical @@ -201,7 +223,8 @@ func Expect(actual interface{}, extra ...interface{}) Assertion { } // ExpectWithOffset wraps an actual value allowing assertions to be made on it: -// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +// ExpectWithOffset(1, "foo").To(Equal("foo")) // // Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument // that is used to modify the call-stack offset when computing line numbers. It is @@ -219,7 +242,7 @@ func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Asse Eventually enables making assertions on asynchronous behavior. Eventually checks that an assertion *eventually* passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments. -The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). +The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). In addition an optional context.Context can be passed in - Eventually will keep trying until either the timeout epxires or the context is cancelled, whichever comes first. Eventually works with any Gomega compatible matcher and supports making assertions against three categories of actual value: @@ -227,15 +250,15 @@ Eventually works with any Gomega compatible matcher and supports making assertio There are several examples of values that can change over time. These can be passed in to Eventually and will be passed to the matcher repeatedly until a match occurs. For example: - c := make(chan bool) - go DoStuff(c) - Eventually(c, "50ms").Should(BeClosed()) + c := make(chan bool) + go DoStuff(c) + Eventually(c, "50ms").Should(BeClosed()) will poll the channel repeatedly until it is closed. In this example `Eventually` will block until either the specified timeout of 50ms has elapsed or the channel is closed, whichever comes first. -Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfuly via: +Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfully via: - Eventually(session).Should(gexec.Exit(0)) + Eventually(session).Should(gexec.Exit(0)) And the gomega/gbytes package allows you to monitor a streaming *gbytes.Buffer until a given string is seen: @@ -252,33 +275,57 @@ this will trigger Go's race detector as the goroutine polling via Eventually wil **Category 2: Make Eventually assertions on functions** -Eventually can be passed functions that **take no arguments** and **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. +Eventually can be passed functions that **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. For example: - Eventually(func() int { - return client.FetchCount() - }).Should(BeNumerically(">=", 17)) + Eventually(func() int { + return client.FetchCount() + }).Should(BeNumerically(">=", 17)) - will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) + will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) -If multple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common patternin Go. +If multiple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. For example, consider a method that returns a value and an error: - func FetchFromDB() (string, error) + + func FetchFromDB() (string, error) Then - Eventually(FetchFromDB).Should(Equal("got it")) + + Eventually(FetchFromDB).Should(Equal("got it")) will pass only if and when the returned error is nil *and* the returned string satisfies the matcher. -It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. You should design your functions with this in mind. +Eventually can also accept functions that take arguments, however you must provide those arguments using .WithArguments(). For example, consider a function that takes a user-id and makes a network request to fetch a full name: + + func FetchFullName(userId int) (string, error) + +You can poll this function like so: + + Eventually(FetchFullName).WithArguments(1138).Should(Equal("Wookie")) + +It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. A common practice here is to use a context. Here's an example that combines Ginkgo's spec timeout support with Eventually: + + It("fetches the correct count", func(ctx SpecContext) { + Eventually(ctx, func() int { + return client.FetchCount(ctx, "/users") + }).Should(BeNumerically(">=", 17)) + }, SpecTimeout(time.Second)) + +you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with paseed-in arguments as long as the context appears first. You can rewrite the above example as: + + It("fetches the correct count", func(ctx SpecContext) { + Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) + }, SpecTimeout(time.Second)) + +Either way the context passd to Eventually is also passed to the underlying funciton. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. **Category 3: Making assertions _in_ the function passed into Eventually** When testing complex systems it can be valuable to assert that a _set_ of assertions passes Eventually. Eventually supports this by accepting functions that take a single Gomega argument and return zero or more values. -Here's an example that makes some asssertions and returns a value and error: +Here's an example that makes some assertions and returns a value and error: Eventually(func(g Gomega) (Widget, error) { ids, err := client.FetchIDs() @@ -292,22 +339,48 @@ will pass only if all the assertions in the polled function pass and the return Eventually also supports a special case polling function that takes a single Gomega argument and returns no values. Eventually assumes such a function is making assertions and is designed to work with the Succeed matcher to validate that all assertions have passed. For example: - Eventually(func(g Gomega) { - model, err := client.Find(1138) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(model.Reticulate()).To(Succeed()) - g.Expect(model.IsReticulated()).To(BeTrue()) - g.Expect(model.Save()).To(Succeed()) - }).Should(Succeed()) + Eventually(func(g Gomega) { + model, err := client.Find(1138) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(model.Reticulate()).To(Succeed()) + g.Expect(model.IsReticulated()).To(BeTrue()) + g.Expect(model.Save()).To(Succeed()) + }).Should(Succeed()) will rerun the function until all assertions pass. -`Eventually` specifying a timeout interval (and an optional polling interval) are -the same as `Eventually(...).WithTimeout` or `Eventually(...).WithTimeout(...).WithPolling`. +You can also pass additional arugments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example: + + Eventually(func(g Gomega, ctx context.Context, path string, expected ...string){ + tok, err := client.GetToken(ctx) + g.Expect(err).NotTo(HaveOccurred()) + + elements, err := client.Fetch(ctx, tok, path) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(elements).To(ConsistOf(expected)) + }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed()) + +You can ensure that you get a number of consecutive successful tries before succeeding using `MustPassRepeatedly(int)`. For Example: + + int count := 0 + Eventually(func() bool { + count++ + return count > 2 + }).MustPassRepeatedly(2).Should(BeTrue()) + // Because we had to wait for 2 calls that returned true + Expect(count).To(Equal(3)) + +Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: + + Eventually(..., "1s", "2s", ctx).Should(...) + +is equivalent to + + Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ -func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { +func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Eventually(actual, intervals...) + return Default.Eventually(actualOrCtx, args...) } // EventuallyWithOffset operates like Eventually but takes an additional @@ -319,9 +392,9 @@ func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { // `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are // the same as `Eventually(...).WithOffset(...).WithTimeout` or // `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. -func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { +func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.EventuallyWithOffset(offset, actual, intervals...) + return Default.EventuallyWithOffset(offset, actualOrCtx, args...) } /* @@ -329,19 +402,19 @@ Consistently, like Eventually, enables making assertions on asynchronous behavio Consistently blocks when called for a specified duration. During that duration Consistently repeatedly polls its matcher and ensures that it is satisfied. If the matcher is consistently satisfied, then Consistently will pass. Otherwise Consistently will fail. -Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional arugment is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. +Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional argument is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. You can also pass in an optional context.Context - Consistently will exit early (with a failure) if the context is cancelled before the waiting duration expires. Consistently accepts the same three categories of actual as Eventually, check the Eventually docs to learn more. Consistently is useful in cases where you want to assert that something *does not happen* for a period of time. For example, you may want to assert that a goroutine does *not* send data down a channel. In this case you could write: - Consistently(channel, "200ms").ShouldNot(Receive()) + Consistently(channel, "200ms").ShouldNot(Receive()) This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. */ -func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { +func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Consistently(actual, intervals...) + return Default.Consistently(actualOrCtx, args...) } // ConsistentlyWithOffset operates like Consistently but takes an additional @@ -350,11 +423,54 @@ func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. -func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { +func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.ConsistentlyWithOffset(offset, actual, intervals...) + return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...) } +/* +StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal. + +You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution. + +You can also wrap StopTrying around an error with `StopTrying("message").Wrap(err)` and can attach additional objects via `StopTrying("message").Attach("description", object). When rendered, the signal will include the wrapped error and any attached objects rendered using Gomega's default formatting. + +Here are a couple of examples. This is how you might use StopTrying() as an error to signal that Eventually should stop: + + playerIndex, numPlayers := 0, 11 + Eventually(func() (string, error) { + if playerIndex == numPlayers { + return "", StopTrying("no more players left") + } + name := client.FetchPlayer(playerIndex) + playerIndex += 1 + return name, nil + }).Should(Equal("Patrick Mahomes")) + +And here's an example where `StopTrying().Now()` is called to halt execution immediately: + + Eventually(func() []string { + names, err := client.FetchAllPlayers() + if err == client.IRRECOVERABLE_ERROR { + StopTrying("Irrecoverable error occurred").Wrap(err).Now() + } + return names + }).Should(ContainElement("Patrick Mahomes")) +*/ +var StopTrying = internal.StopTrying + +/* +TryAgainAfter() allows you to adjust the polling interval for the _next_ iteration of `Eventually` or `Consistently`. Like `StopTrying` you can either return `TryAgainAfter` as an error or trigger it immedieately with `.Now()` + +When `TryAgainAfter(` is triggered `Eventually` and `Consistently` will wait for that duration. If a timeout occurs before the next poll is triggered both `Eventually` and `Consistently` will always fail with the content of the TryAgainAfter message. As with StopTrying you can `.Wrap()` and error and `.Attach()` additional objects to `TryAgainAfter`. +*/ +var TryAgainAfter = internal.TryAgainAfter + +/* +PollingSignalError is the error returned by StopTrying() and TryAgainAfter() +*/ +type PollingSignalError = internal.PollingSignalError + // SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. func SetDefaultEventuallyTimeout(t time.Duration) { Default.SetDefaultEventuallyTimeout(t) @@ -388,8 +504,8 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { // // Example: // -// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") -// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) type AsyncAssertion = types.AsyncAssertion // GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. @@ -411,7 +527,7 @@ type GomegaAsyncAssertion = types.AsyncAssertion // // Example: // -// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) type Assertion = types.Assertion // GomegaAssertion is deprecated in favor of Assertion, which does not stutter. diff --git a/api/vendor/github.com/onsi/gomega/internal/assertion.go b/api/vendor/github.com/onsi/gomega/internal/assertion.go index b3c26889a..08356a610 100644 --- a/api/vendor/github.com/onsi/gomega/internal/assertion.go +++ b/api/vendor/github.com/onsi/gomega/internal/assertion.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" ) @@ -45,26 +46,31 @@ func (assertion *Assertion) Error() types.Assertion { func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) } func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) } func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } @@ -141,7 +147,12 @@ func vetActuals(actuals []interface{}, skipIndex int) (bool, string) { if actual != nil { zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() if !reflect.DeepEqual(zeroValue, actual) { - message := fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + var message string + if err, ok := actual.(error); ok { + message = fmt.Sprintf("Unexpected error: %s\n%s", err, format.Object(err, 1)) + } else { + message = fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + } return false, message } } diff --git a/api/vendor/github.com/onsi/gomega/internal/async_assertion.go b/api/vendor/github.com/onsi/gomega/internal/async_assertion.go index 99f4ebcfe..1188b0bce 100644 --- a/api/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/api/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -1,15 +1,53 @@ package internal import ( + "context" "errors" "fmt" "reflect" "runtime" + "sync" "time" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" ) +var errInterface = reflect.TypeOf((*error)(nil)).Elem() +var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem() +var contextType = reflect.TypeOf(new(context.Context)).Elem() + +type formattedGomegaError interface { + FormattedGomegaError() string +} + +type asyncPolledActualError struct { + message string +} + +func (err *asyncPolledActualError) Error() string { + return err.message +} + +func (err *asyncPolledActualError) FormattedGomegaError() string { + return err.message +} + +type contextWithAttachProgressReporter interface { + AttachProgressReporter(func() string) func() +} + +type asyncGomegaHaltExecutionError struct{} + +func (a asyncGomegaHaltExecutionError) GinkgoRecoverShouldIgnoreThisPanic() {} +func (a asyncGomegaHaltExecutionError) Error() string { + return `An assertion has failed in a goroutine. You should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. This will allow Ginkgo and Gomega to correctly capture and manage this panic.` +} + type AsyncAssertionType uint const ( @@ -17,71 +55,45 @@ const ( AsyncAssertionTypeConsistently ) +func (at AsyncAssertionType) String() string { + switch at { + case AsyncAssertionTypeEventually: + return "Eventually" + case AsyncAssertionTypeConsistently: + return "Consistently" + } + return "INVALID ASYNC ASSERTION TYPE" +} + type AsyncAssertion struct { asyncType AsyncAssertionType - actualIsFunc bool - actualValue interface{} - actualFunc func() ([]reflect.Value, error) + actualIsFunc bool + actual interface{} + argsToForward []interface{} - timeoutInterval time.Duration - pollingInterval time.Duration - offset int - g *Gomega + timeoutInterval time.Duration + pollingInterval time.Duration + mustPassRepeatedly int + ctx context.Context + offset int + g *Gomega } -func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { +func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion { out := &AsyncAssertion{ - asyncType: asyncType, - timeoutInterval: timeoutInterval, - pollingInterval: pollingInterval, - offset: offset, - g: g, + asyncType: asyncType, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + mustPassRepeatedly: mustPassRepeatedly, + offset: offset, + ctx: ctx, + g: g, } - switch actualType := reflect.TypeOf(actualInput); { - case actualType.Kind() != reflect.Func: - out.actualValue = actualInput - case actualType.NumIn() == 0 && actualType.NumOut() > 0: + out.actual = actualInput + if actualInput != nil && reflect.TypeOf(actualInput).Kind() == reflect.Func { out.actualIsFunc = true - out.actualFunc = func() ([]reflect.Value, error) { - return reflect.ValueOf(actualInput).Call([]reflect.Value{}), nil - } - case actualType.NumIn() == 1 && actualType.In(0).Implements(reflect.TypeOf((*types.Gomega)(nil)).Elem()): - out.actualIsFunc = true - out.actualFunc = func() (values []reflect.Value, err error) { - var assertionFailure error - assertionCapturingGomega := NewGomega(g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - _, file, line, _ := runtime.Caller(skip + 1) - assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) - panic("stop execution") - }) - - defer func() { - if actualType.NumOut() == 0 { - if assertionFailure == nil { - values = []reflect.Value{reflect.Zero(reflect.TypeOf((*error)(nil)).Elem())} - } else { - values = []reflect.Value{reflect.ValueOf(assertionFailure)} - } - } else { - err = assertionFailure - } - if e := recover(); e != nil && assertionFailure == nil { - panic(e) - } - }() - - values = reflect.ValueOf(actualInput).Call([]reflect.Value{reflect.ValueOf(assertionCapturingGomega)}) - return - } - default: - msg := fmt.Sprintf("The function passed to Gomega's async assertions should either take no arguments and return values, or take a single Gomega interface that it can use to make assertions within the body of the function. When taking a Gomega interface the function can optionally return values or return nothing. The function you passed takes %d arguments and returns %d values.", actualType.NumIn(), actualType.NumOut()) - g.Fail(msg, offset+4) } return out @@ -102,13 +114,40 @@ func (assertion *AsyncAssertion) WithPolling(interval time.Duration) types.Async return assertion } +func (assertion *AsyncAssertion) Within(timeout time.Duration) types.AsyncAssertion { + assertion.timeoutInterval = timeout + return assertion +} + +func (assertion *AsyncAssertion) ProbeEvery(interval time.Duration) types.AsyncAssertion { + assertion.pollingInterval = interval + return assertion +} + +func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAssertion { + assertion.ctx = ctx + return assertion +} + +func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion { + assertion.argsToForward = argsToForward + return assertion +} + +func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssertion { + assertion.mustPassRepeatedly = count + return assertion +} + func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Asynchronous assertion", optionalDescription...) return assertion.match(matcher, true, optionalDescription...) } func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Asynchronous assertion", optionalDescription...) return assertion.match(matcher, false, optionalDescription...) } @@ -124,112 +163,409 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } -func (assertion *AsyncAssertion) pollActual() (interface{}, error) { +func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) { + if len(values) == 0 { + return nil, &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType), + } + } + + actual := values[0].Interface() + if _, ok := AsPollingSignalError(actual); ok { + return actual, actual.(error) + } + + var err error + for i, extraValue := range values[1:] { + extra := extraValue.Interface() + if extra == nil { + continue + } + if _, ok := AsPollingSignalError(extra); ok { + return actual, extra.(error) + } + extraType := reflect.TypeOf(extra) + zero := reflect.Zero(extraType).Interface() + if reflect.DeepEqual(extra, zero) { + continue + } + if i == len(values)-2 && extraType.Implements(errInterface) { + err = extra.(error) + } + if err == nil { + err = &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s had an unexpected non-nil/non-zero return value at index %d:\n%s", assertion.asyncType, i+1, format.Object(extra, 1)), + } + } + } + + return actual, err +} + +func (assertion *AsyncAssertion) invalidFunctionError(t reflect.Type) error { + return fmt.Errorf(`The function passed to %s had an invalid signature of %s. Functions passed to %s must either: + + (a) have return values or + (b) take a Gomega interface as their first argument and use that Gomega instance to make assertions. + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, t, assertion.asyncType) +} + +func (assertion *AsyncAssertion) noConfiguredContextForFunctionError() error { + return fmt.Errorf(`The function passed to %s requested a context.Context, but no context has been provided. Please pass one in using %s().WithContext(). + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, assertion.asyncType) +} + +func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvided int) error { + have := "have" + if numProvided == 1 { + have = "has" + } + return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments. + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType) +} + +func (assertion *AsyncAssertion) invalidMustPassRepeatedlyError(reason string) error { + return fmt.Errorf(`Invalid use of MustPassRepeatedly with %s %s + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, reason) +} + +func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) { if !assertion.actualIsFunc { - return assertion.actualValue, nil + return func() (interface{}, error) { return assertion.actual, nil }, nil } + actualValue := reflect.ValueOf(assertion.actual) + actualType := reflect.TypeOf(assertion.actual) + numIn, numOut, isVariadic := actualType.NumIn(), actualType.NumOut(), actualType.IsVariadic() - values, err := assertion.actualFunc() - if err != nil { - return nil, err + if numIn == 0 && numOut == 0 { + return nil, assertion.invalidFunctionError(actualType) } - extras := []interface{}{nil} - for _, value := range values[1:] { - extras = append(extras, value.Interface()) + takesGomega, takesContext := false, false + if numIn > 0 { + takesGomega, takesContext = actualType.In(0).Implements(gomegaType), actualType.In(0).Implements(contextType) } - success, message := vetActuals(extras, 0) - if !success { - return nil, errors.New(message) + if takesGomega && numIn > 1 && actualType.In(1).Implements(contextType) { + takesContext = true + } + if takesContext && len(assertion.argsToForward) > 0 && reflect.TypeOf(assertion.argsToForward[0]).Implements(contextType) { + takesContext = false + } + if !takesGomega && numOut == 0 { + return nil, assertion.invalidFunctionError(actualType) + } + if takesContext && assertion.ctx == nil { + return nil, assertion.noConfiguredContextForFunctionError() + } + + var assertionFailure error + inValues := []reflect.Value{} + if takesGomega { + inValues = append(inValues, reflect.ValueOf(NewGomega(assertion.g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + _, file, line, _ := runtime.Caller(skip + 1) + assertionFailure = &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s failed at %s:%d with:\n%s", assertion.asyncType, file, line, message), + } + // we throw an asyncGomegaHaltExecutionError so that defer GinkgoRecover() can catch this error if the user makes an assertion in a goroutine + panic(asyncGomegaHaltExecutionError{}) + }))) + } + if takesContext { + inValues = append(inValues, reflect.ValueOf(assertion.ctx)) + } + for _, arg := range assertion.argsToForward { + inValues = append(inValues, reflect.ValueOf(arg)) + } + + if !isVariadic && numIn != len(inValues) { + return nil, assertion.argumentMismatchError(actualType, len(inValues)) + } else if isVariadic && len(inValues) < numIn-1 { + return nil, assertion.argumentMismatchError(actualType, len(inValues)) } - return values[0].Interface(), nil + if assertion.mustPassRepeatedly != 1 && assertion.asyncType != AsyncAssertionTypeEventually { + return nil, assertion.invalidMustPassRepeatedlyError("it can only be used with Eventually") + } + if assertion.mustPassRepeatedly < 1 { + return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1") + } + + return func() (actual interface{}, err error) { + var values []reflect.Value + assertionFailure = nil + defer func() { + if numOut == 0 && takesGomega { + actual = assertionFailure + } else { + actual, err = assertion.processReturnValues(values) + _, isAsyncError := AsPollingSignalError(err) + if assertionFailure != nil && !isAsyncError { + err = assertionFailure + } + } + if e := recover(); e != nil { + if _, isAsyncError := AsPollingSignalError(e); isAsyncError { + err = e.(error) + } else if assertionFailure == nil { + panic(e) + } + } + }() + values = actualValue.Call(inValues) + return + }, nil +} + +func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { + if assertion.timeoutInterval >= 0 { + return time.After(assertion.timeoutInterval) + } + + if assertion.asyncType == AsyncAssertionTypeConsistently { + return time.After(assertion.g.DurationBundle.ConsistentlyDuration) + } else { + if assertion.ctx == nil { + return time.After(assertion.g.DurationBundle.EventuallyTimeout) + } else { + return nil + } + } } -func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { - if assertion.actualIsFunc { - return true +func (assertion *AsyncAssertion) afterPolling() <-chan time.Time { + if assertion.pollingInterval >= 0 { + return time.After(assertion.pollingInterval) } - return types.MatchMayChangeInTheFuture(matcher, value) + if assertion.asyncType == AsyncAssertionTypeConsistently { + return time.After(assertion.g.DurationBundle.ConsistentlyPollingInterval) + } else { + return time.After(assertion.g.DurationBundle.EventuallyPollingInterval) + } +} + +func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) { + return false + } + return true +} + +func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) { + defer func() { + if e := recover(); e != nil { + if _, isAsyncError := AsPollingSignalError(e); isAsyncError { + err = e.(error) + } else { + panic(e) + } + } + }() + + matches, err = matcher.Match(value) + + return } func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { timer := time.Now() - timeout := time.After(assertion.timeoutInterval) + timeout := assertion.afterTimeout() + lock := sync.Mutex{} - var matches bool - var err error - mayChange := true - value, err := assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } + var matches, hasLastValidActual bool + var actual, lastValidActual interface{} + var actualErr, matcherErr error + var oracleMatcherSaysStop bool assertion.g.THelper() - fail := func(preamble string) { - errMsg := "" + pollActual, buildActualPollerErr := assertion.buildActualPoller() + if buildActualPollerErr != nil { + assertion.g.Fail(buildActualPollerErr.Error(), 2+assertion.offset) + return false + } + + actual, actualErr = pollActual() + if actualErr == nil { + lastValidActual = actual + hasLastValidActual = true + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual) + matches, matcherErr = assertion.pollMatcher(matcher, actual) + } + + renderError := func(preamble string, err error) string { message := "" - if err != nil { - errMsg = "Error: " + err.Error() + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + message = err.Error() + for _, attachment := range pollingSignalErr.Attachments { + message += fmt.Sprintf("\n%s:\n", attachment.Description) + message += format.Object(attachment.Object, 1) + } } else { - if desiredMatch { - message = matcher.FailureMessage(value) + message = preamble + "\n" + format.Object(err, 1) + } + return message + } + + messageGenerator := func() string { + // can be called out of band by Ginkgo if the user requests a progress report + lock.Lock() + defer lock.Unlock() + message := "" + + if actualErr == nil { + if matcherErr == nil { + if desiredMatch != matches { + if desiredMatch { + message += matcher.FailureMessage(actual) + } else { + message += matcher.NegatedFailureMessage(actual) + } + } else { + if assertion.asyncType == AsyncAssertionTypeConsistently { + message += "There is no failure as the matcher passed to Consistently has not yet failed" + } else { + message += "There is no failure as the matcher passed to Eventually succeeded on its most recent iteration" + } + } } else { - message = matcher.NegatedFailureMessage(value) + var fgErr formattedGomegaError + if errors.As(actualErr, &fgErr) { + message += fgErr.FormattedGomegaError() + "\n" + } else { + message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) + } + } + } else { + var fgErr formattedGomegaError + if errors.As(actualErr, &fgErr) { + message += fgErr.FormattedGomegaError() + "\n" + } else { + message += renderError(fmt.Sprintf("The function passed to %s returned the following error:", assertion.asyncType), actualErr) + } + if hasLastValidActual { + message += fmt.Sprintf("\nAt one point, however, the function did return successfully.\nYet, %s failed because", assertion.asyncType) + _, e := matcher.Match(lastValidActual) + if e != nil { + message += renderError(" the matcher returned the following error:", e) + } else { + message += " the matcher was not satisfied:\n" + if desiredMatch { + message += matcher.FailureMessage(lastValidActual) + } else { + message += matcher.NegatedFailureMessage(lastValidActual) + } + } } } - assertion.g.THelper() + description := assertion.buildDescription(optionalDescription...) - assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + return fmt.Sprintf("%s%s", description, message) } - if assertion.asyncType == AsyncAssertionTypeEventually { - for { - if err == nil && matches == desiredMatch { - return true - } + fail := func(preamble string) { + assertion.g.THelper() + assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s", preamble, time.Since(timer).Seconds(), messageGenerator()), 3+assertion.offset) + } - if !mayChange { - fail("No future change is possible. Bailing out early") - return false - } + var contextDone <-chan struct{} + if assertion.ctx != nil { + contextDone = assertion.ctx.Done() + if v, ok := assertion.ctx.Value("GINKGO_SPEC_CONTEXT").(contextWithAttachProgressReporter); ok { + detach := v.AttachProgressReporter(messageGenerator) + defer detach() + } + } - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) + // Used to count the number of times in a row a step passed + passedRepeatedlyCount := 0 + for { + var nextPoll <-chan time.Time = nil + var isTryAgainAfterError = false + + for _, err := range []error{actualErr, matcherErr} { + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + if pollingSignalErr.IsStopTrying() { + fail("Told to stop trying") + return false + } + if pollingSignalErr.IsTryAgainAfter() { + nextPoll = time.After(pollingSignalErr.TryAgainDuration()) + isTryAgainAfterError = true } - case <-timeout: - fail("Timed out") - return false } } - } else if assertion.asyncType == AsyncAssertionTypeConsistently { - for { - if !(err == nil && matches == desiredMatch) { + + if actualErr == nil && matcherErr == nil && matches == desiredMatch { + if assertion.asyncType == AsyncAssertionTypeEventually { + passedRepeatedlyCount += 1 + if passedRepeatedlyCount == assertion.mustPassRepeatedly { + return true + } + } + } else if !isTryAgainAfterError { + if assertion.asyncType == AsyncAssertionTypeConsistently { fail("Failed") return false } + // Reset the consecutive pass count + passedRepeatedlyCount = 0 + } - if !mayChange { + if oracleMatcherSaysStop { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("No future change is possible. Bailing out early") + return false + } else { return true } + } + + if nextPoll == nil { + nextPoll = assertion.afterPolling() + } - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) + select { + case <-nextPoll: + a, e := pollActual() + lock.Lock() + actual, actualErr = a, e + lock.Unlock() + if actualErr == nil { + lock.Lock() + lastValidActual = actual + hasLastValidActual = true + lock.Unlock() + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual) + m, e := assertion.pollMatcher(matcher, actual) + lock.Lock() + matches, matcherErr = m, e + lock.Unlock() + } + case <-contextDone: + fail("Context was cancelled") + return false + case <-timeout: + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Timed out") + return false + } else { + if isTryAgainAfterError { + fail("Timed out while waiting on TryAgainAfter") + return false } - case <-timeout: return true } } } - - return false } diff --git a/api/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/api/vendor/github.com/onsi/gomega/internal/duration_bundle.go index af8d989fa..6e0d90d3a 100644 --- a/api/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/api/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -44,28 +44,28 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration { return duration } -func toDuration(input interface{}) time.Duration { +func toDuration(input interface{}) (time.Duration, error) { duration, ok := input.(time.Duration) if ok { - return duration + return duration, nil } value := reflect.ValueOf(input) kind := reflect.TypeOf(input).Kind() if reflect.Int <= kind && kind <= reflect.Int64 { - return time.Duration(value.Int()) * time.Second + return time.Duration(value.Int()) * time.Second, nil } else if reflect.Uint <= kind && kind <= reflect.Uint64 { - return time.Duration(value.Uint()) * time.Second + return time.Duration(value.Uint()) * time.Second, nil } else if reflect.Float32 <= kind && kind <= reflect.Float64 { - return time.Duration(value.Float() * float64(time.Second)) + return time.Duration(value.Float() * float64(time.Second)), nil } else if reflect.String == kind { duration, err := time.ParseDuration(value.String()) if err != nil { - panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + return 0, fmt.Errorf("%#v is not a valid parsable duration string: %w", input, err) } - return duration + return duration, nil } - panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) + return 0, fmt.Errorf("%#v is not a valid interval. Must be a time.Duration, a parsable duration string, or a number.", input) } diff --git a/api/vendor/github.com/onsi/gomega/internal/gomega.go b/api/vendor/github.com/onsi/gomega/internal/gomega.go index d26a67485..de1f4f336 100644 --- a/api/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/api/vendor/github.com/onsi/gomega/internal/gomega.go @@ -1,6 +1,7 @@ package internal import ( + "context" "time" "github.com/onsi/gomega/types" @@ -51,38 +52,64 @@ func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...inter return NewAssertion(actual, g, offset, extra...) } -func (g *Gomega) Eventually(actual interface{}, intervals ...interface{}) types.AsyncAssertion { - return g.EventuallyWithOffset(0, actual, intervals...) +func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...) } -func (g *Gomega) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion { - timeoutInterval := g.DurationBundle.EventuallyTimeout - pollingInterval := g.DurationBundle.EventuallyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } +func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...) +} - return NewAsyncAssertion(AsyncAssertionTypeEventually, actual, g, timeoutInterval, pollingInterval, offset) +func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...) } -func (g *Gomega) Consistently(actual interface{}, intervals ...interface{}) types.AsyncAssertion { - return g.ConsistentlyWithOffset(0, actual, intervals...) +func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...) } -func (g *Gomega) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion { - timeoutInterval := g.DurationBundle.ConsistentlyDuration - pollingInterval := g.DurationBundle.ConsistentlyPollingInterval +func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + baseOffset := 3 + timeoutInterval := -time.Duration(1) + pollingInterval := -time.Duration(1) + intervals := []interface{}{} + var ctx context.Context + + actual := actualOrCtx + startingIndex := 0 + if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 { + // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration + // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual + if _, err := toDuration(args[0]); err != nil { + ctx = actualOrCtx.(context.Context) + actual = args[0] + startingIndex = 1 + } + } + + for _, arg := range args[startingIndex:] { + switch v := arg.(type) { + case context.Context: + ctx = v + default: + intervals = append(intervals, arg) + } + } + var err error if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) + timeoutInterval, err = toDuration(intervals[0]) + if err != nil { + g.Fail(err.Error(), offset+baseOffset) + } } if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) + pollingInterval, err = toDuration(intervals[1]) + if err != nil { + g.Fail(err.Error(), offset+baseOffset) + } } - return NewAsyncAssertion(AsyncAssertionTypeConsistently, actual, g, timeoutInterval, pollingInterval, offset) + return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, 1, ctx, offset) } func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) { diff --git a/api/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/api/vendor/github.com/onsi/gomega/internal/polling_signal_error.go new file mode 100644 index 000000000..83b04b1a4 --- /dev/null +++ b/api/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -0,0 +1,106 @@ +package internal + +import ( + "errors" + "fmt" + "time" +) + +type PollingSignalErrorType int + +const ( + PollingSignalErrorTypeStopTrying PollingSignalErrorType = iota + PollingSignalErrorTypeTryAgainAfter +) + +type PollingSignalError interface { + error + Wrap(err error) PollingSignalError + Attach(description string, obj any) PollingSignalError + Now() +} + +var StopTrying = func(message string) PollingSignalError { + return &PollingSignalErrorImpl{ + message: message, + pollingSignalErrorType: PollingSignalErrorTypeStopTrying, + } +} + +var TryAgainAfter = func(duration time.Duration) PollingSignalError { + return &PollingSignalErrorImpl{ + message: fmt.Sprintf("told to try again after %s", duration), + duration: duration, + pollingSignalErrorType: PollingSignalErrorTypeTryAgainAfter, + } +} + +type PollingSignalErrorAttachment struct { + Description string + Object any +} + +type PollingSignalErrorImpl struct { + message string + wrappedErr error + pollingSignalErrorType PollingSignalErrorType + duration time.Duration + Attachments []PollingSignalErrorAttachment +} + +func (s *PollingSignalErrorImpl) Wrap(err error) PollingSignalError { + s.wrappedErr = err + return s +} + +func (s *PollingSignalErrorImpl) Attach(description string, obj any) PollingSignalError { + s.Attachments = append(s.Attachments, PollingSignalErrorAttachment{description, obj}) + return s +} + +func (s *PollingSignalErrorImpl) Error() string { + if s.wrappedErr == nil { + return s.message + } else { + return s.message + ": " + s.wrappedErr.Error() + } +} + +func (s *PollingSignalErrorImpl) Unwrap() error { + if s == nil { + return nil + } + return s.wrappedErr +} + +func (s *PollingSignalErrorImpl) Now() { + panic(s) +} + +func (s *PollingSignalErrorImpl) IsStopTrying() bool { + return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying +} + +func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { + return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter +} + +func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration { + return s.duration +} + +func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) { + if actual == nil { + return nil, false + } + if actualErr, ok := actual.(error); ok { + var target *PollingSignalErrorImpl + if errors.As(actualErr, &target) { + return target, true + } else { + return nil, false + } + } + + return nil, false +} diff --git a/api/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/api/vendor/github.com/onsi/gomega/internal/vetoptdesc.go new file mode 100644 index 000000000..f29587641 --- /dev/null +++ b/api/vendor/github.com/onsi/gomega/internal/vetoptdesc.go @@ -0,0 +1,22 @@ +package internal + +import ( + "fmt" + + "github.com/onsi/gomega/types" +) + +// vetOptionalDescription vets the optional description args: if it finds any +// Gomega matcher at the beginning it panics. This allows for rendering Gomega +// matchers as part of an optional Description, as long as they're not in the +// first slot. +func vetOptionalDescription(assertion string, optionalDescription ...interface{}) { + if len(optionalDescription) == 0 { + return + } + if _, isGomegaMatcher := optionalDescription[0].(types.GomegaMatcher); isGomegaMatcher { + panic(fmt.Sprintf("%s has a GomegaMatcher as the first element of optionalDescription.\n\t"+ + "Do you mean to use And/Or/SatisfyAll/SatisfyAny to combine multiple matchers?", + assertion)) + } +} diff --git a/api/vendor/github.com/onsi/gomega/matchers.go b/api/vendor/github.com/onsi/gomega/matchers.go index b46e461a5..b832f3dba 100644 --- a/api/vendor/github.com/onsi/gomega/matchers.go +++ b/api/vendor/github.com/onsi/gomega/matchers.go @@ -3,139 +3,161 @@ package gomega import ( "time" + "github.com/google/go-cmp/cmp" "github.com/onsi/gomega/matchers" "github.com/onsi/gomega/types" ) -//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about -//types when performing comparisons. -//It is an error for both actual and expected to be nil. Use BeNil() instead. +// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +// types when performing comparisons. +// It is an error for both actual and expected to be nil. Use BeNil() instead. func Equal(expected interface{}) types.GomegaMatcher { return &matchers.EqualMatcher{ Expected: expected, } } -//BeEquivalentTo is more lax than Equal, allowing equality between different types. -//This is done by converting actual to have the type of expected before -//attempting equality with reflect.DeepEqual. -//It is an error for actual and expected to be nil. Use BeNil() instead. +// BeEquivalentTo is more lax than Equal, allowing equality between different types. +// This is done by converting actual to have the type of expected before +// attempting equality with reflect.DeepEqual. +// It is an error for actual and expected to be nil. Use BeNil() instead. func BeEquivalentTo(expected interface{}) types.GomegaMatcher { return &matchers.BeEquivalentToMatcher{ Expected: expected, } } -//BeIdenticalTo uses the == operator to compare actual with expected. -//BeIdenticalTo is strict about types when performing comparisons. -//It is an error for both actual and expected to be nil. Use BeNil() instead. +// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison. +// You can pass cmp.Option as options. +// It is an error for actual and expected to be nil. Use BeNil() instead. +func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher { + return &matchers.BeComparableToMatcher{ + Expected: expected, + Options: opts, + } +} + +// BeIdenticalTo uses the == operator to compare actual with expected. +// BeIdenticalTo is strict about types when performing comparisons. +// It is an error for both actual and expected to be nil. Use BeNil() instead. func BeIdenticalTo(expected interface{}) types.GomegaMatcher { return &matchers.BeIdenticalToMatcher{ Expected: expected, } } -//BeNil succeeds if actual is nil +// BeNil succeeds if actual is nil func BeNil() types.GomegaMatcher { return &matchers.BeNilMatcher{} } -//BeTrue succeeds if actual is true +// BeTrue succeeds if actual is true func BeTrue() types.GomegaMatcher { return &matchers.BeTrueMatcher{} } -//BeFalse succeeds if actual is false +// BeFalse succeeds if actual is false func BeFalse() types.GomegaMatcher { return &matchers.BeFalseMatcher{} } -//HaveOccurred succeeds if actual is a non-nil error -//The typical Go error checking pattern looks like: -// err := SomethingThatMightFail() -// Expect(err).ShouldNot(HaveOccurred()) +// HaveOccurred succeeds if actual is a non-nil error +// The typical Go error checking pattern looks like: +// +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) func HaveOccurred() types.GomegaMatcher { return &matchers.HaveOccurredMatcher{} } -//Succeed passes if actual is a nil error -//Succeed is intended to be used with functions that return a single error value. Instead of -// err := SomethingThatMightFail() -// Expect(err).ShouldNot(HaveOccurred()) +// Succeed passes if actual is a nil error +// Succeed is intended to be used with functions that return a single error value. Instead of +// +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) +// +// You can write: // -//You can write: -// Expect(SomethingThatMightFail()).Should(Succeed()) +// Expect(SomethingThatMightFail()).Should(Succeed()) // -//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect -//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. -//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +// It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +// functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +// This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. func Succeed() types.GomegaMatcher { return &matchers.SucceedMatcher{} } -//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// MatchError succeeds if actual is a non-nil error that matches the passed in +// string, error, or matcher. +// +// These are valid use-cases: // -//These are valid use-cases: // Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" // Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// Expect(err).Should(MatchError(ContainsSubstring("sprocket not found"))) // asserts that edrr.Error() contains substring "sprocket not found" // -//It is an error for err to be nil or an object that does not implement the Error interface +// It is an error for err to be nil or an object that does not implement the +// Error interface func MatchError(expected interface{}) types.GomegaMatcher { return &matchers.MatchErrorMatcher{ Expected: expected, } } -//BeClosed succeeds if actual is a closed channel. -//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// BeClosed succeeds if actual is a closed channel. +// It is an error to pass a non-channel to BeClosed, it is also an error to pass nil // -//In order to check whether or not the channel is closed, Gomega must try to read from the channel -//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about -//values coming down the channel. +// In order to check whether or not the channel is closed, Gomega must try to read from the channel +// (even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +// values coming down the channel. // -//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before -//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +// asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). // -//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +// Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. func BeClosed() types.GomegaMatcher { return &matchers.BeClosedMatcher{} } -//Receive succeeds if there is a value to be received on actual. -//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// Receive succeeds if there is a value to be received on actual. +// Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// +// Receive returns immediately and never blocks: +// +// - If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +// - If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. // -//Receive returns immediately and never blocks: +// - If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. // -//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// If you have a go-routine running in the background that will write to channel `c` you can: // -//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// Eventually(c).Should(Receive()) // -//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) // -//If you have a go-routine running in the background that will write to channel `c` you can: -// Eventually(c).Should(Receive()) +// A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: // -//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// Consistently(c).ShouldNot(Receive()) // -//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: -// Consistently(c).ShouldNot(Receive()) +// You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: // -//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: -// Expect(c).Should(Receive(Equal("foo"))) +// Expect(c).Should(Receive(Equal("foo"))) // -//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. // -//Passing Receive a matcher is especially useful when paired with Eventually: +// Passing Receive a matcher is especially useful when paired with Eventually: // -// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// Eventually(c).Should(Receive(ContainSubstring("bar"))) // -//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. // -//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: -// var myThing thing -// Eventually(thingChan).Should(Receive(&myThing)) -// Expect(myThing.Sprocket).Should(Equal("foo")) -// Expect(myThing.IsValid()).Should(BeTrue()) +// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Expect(myThing.Sprocket).Should(Equal("foo")) +// Expect(myThing.IsValid()).Should(BeTrue()) func Receive(args ...interface{}) types.GomegaMatcher { var arg interface{} if len(args) > 0 { @@ -147,27 +169,27 @@ func Receive(args ...interface{}) types.GomegaMatcher { } } -//BeSent succeeds if a value can be sent to actual. -//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. -//In addition, actual must not be closed. +// BeSent succeeds if a value can be sent to actual. +// Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +// In addition, actual must not be closed. // -//BeSent never blocks: +// BeSent never blocks: // -//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately -//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout -//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// - If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately +// - If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +// - If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately // -//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). -//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. func BeSent(arg interface{}) types.GomegaMatcher { return &matchers.BeSentMatcher{ Arg: arg, } } -//MatchRegexp succeeds if actual is a string or stringer that matches the -//passed-in regexp. Optional arguments can be provided to construct a regexp -//via fmt.Sprintf(). +// MatchRegexp succeeds if actual is a string or stringer that matches the +// passed-in regexp. Optional arguments can be provided to construct a regexp +// via fmt.Sprintf(). func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { return &matchers.MatchRegexpMatcher{ Regexp: regexp, @@ -175,9 +197,9 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { } } -//ContainSubstring succeeds if actual is a string or stringer that contains the -//passed-in substring. Optional arguments can be provided to construct the substring -//via fmt.Sprintf(). +// ContainSubstring succeeds if actual is a string or stringer that contains the +// passed-in substring. Optional arguments can be provided to construct the substring +// via fmt.Sprintf(). func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { return &matchers.ContainSubstringMatcher{ Substr: substr, @@ -185,9 +207,9 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { } } -//HavePrefix succeeds if actual is a string or stringer that contains the -//passed-in string as a prefix. Optional arguments can be provided to construct -//via fmt.Sprintf(). +// HavePrefix succeeds if actual is a string or stringer that contains the +// passed-in string as a prefix. Optional arguments can be provided to construct +// via fmt.Sprintf(). func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { return &matchers.HavePrefixMatcher{ Prefix: prefix, @@ -195,9 +217,9 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { } } -//HaveSuffix succeeds if actual is a string or stringer that contains the -//passed-in string as a suffix. Optional arguments can be provided to construct -//via fmt.Sprintf(). +// HaveSuffix succeeds if actual is a string or stringer that contains the +// passed-in string as a suffix. Optional arguments can be provided to construct +// via fmt.Sprintf(). func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { return &matchers.HaveSuffixMatcher{ Suffix: suffix, @@ -205,136 +227,191 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { } } -//MatchJSON succeeds if actual is a string or stringer of JSON that matches -//the expected JSON. The JSONs are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +// MatchJSON succeeds if actual is a string or stringer of JSON that matches +// the expected JSON. The JSONs are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. func MatchJSON(json interface{}) types.GomegaMatcher { return &matchers.MatchJSONMatcher{ JSONToMatch: json, } } -//MatchXML succeeds if actual is a string or stringer of XML that matches -//the expected XML. The XMLs are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like whitespaces shouldn't matter. +// MatchXML succeeds if actual is a string or stringer of XML that matches +// the expected XML. The XMLs are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like whitespaces shouldn't matter. func MatchXML(xml interface{}) types.GomegaMatcher { return &matchers.MatchXMLMatcher{ XMLToMatch: xml, } } -//MatchYAML succeeds if actual is a string or stringer of YAML that matches -//the expected YAML. The YAML's are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +// MatchYAML succeeds if actual is a string or stringer of YAML that matches +// the expected YAML. The YAML's are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. func MatchYAML(yaml interface{}) types.GomegaMatcher { return &matchers.MatchYAMLMatcher{ YAMLToMatch: yaml, } } -//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +// BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. func BeEmpty() types.GomegaMatcher { return &matchers.BeEmptyMatcher{} } -//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +// HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. func HaveLen(count int) types.GomegaMatcher { return &matchers.HaveLenMatcher{ Count: count, } } -//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. +// HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. func HaveCap(count int) types.GomegaMatcher { return &matchers.HaveCapMatcher{ Count: count, } } -//BeZero succeeds if actual is the zero value for its type or if actual is nil. +// BeZero succeeds if actual is the zero value for its type or if actual is nil. func BeZero() types.GomegaMatcher { return &matchers.BeZeroMatcher{} } -//ContainElement succeeds if actual contains the passed in element. -//By default ContainElement() uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// ContainElement succeeds if actual contains the passed in element. By default +// ContainElement() uses Equal() to perform the match, however a matcher can be +// passed in instead: +// +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// +// Actual must be an array, slice or map. For maps, ContainElement searches +// through the map's values. // -//Actual must be an array, slice or map. -//For maps, ContainElement searches through the map's values. -func ContainElement(element interface{}) types.GomegaMatcher { +// If you want to have a copy of the matching element(s) found you can pass a +// pointer to a variable of the appropriate type. If the variable isn't a slice +// or map, then exactly one match will be expected and returned. If the variable +// is a slice or map, then at least one match is expected and all matches will be +// stored in the variable. +// +// var findings []string +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings))) +func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher { return &matchers.ContainElementMatcher{ Element: element, + Result: result, } } -//BeElementOf succeeds if actual is contained in the passed in elements. -//BeElementOf() always uses Equal() to perform the match. -//When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves -//as the reverse of ContainElement() that operates with Equal() to perform the match. -// Expect(2).Should(BeElementOf([]int{1, 2})) -// Expect(2).Should(BeElementOf([2]int{1, 2})) -//Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...): -// Expect(2).Should(BeElementOf(1, 2)) +// BeElementOf succeeds if actual is contained in the passed in elements. +// BeElementOf() always uses Equal() to perform the match. +// When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves +// as the reverse of ContainElement() that operates with Equal() to perform the match. +// +// Expect(2).Should(BeElementOf([]int{1, 2})) +// Expect(2).Should(BeElementOf([2]int{1, 2})) +// +// Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...): +// +// Expect(2).Should(BeElementOf(1, 2)) // -//Actual must be typed. +// Actual must be typed. func BeElementOf(elements ...interface{}) types.GomegaMatcher { return &matchers.BeElementOfMatcher{ Elements: elements, } } -//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. -//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// BeKeyOf succeeds if actual is contained in the keys of the passed in map. +// BeKeyOf() always uses Equal() to perform the match between actual and the map keys. // -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false})) +func BeKeyOf(element interface{}) types.GomegaMatcher { + return &matchers.BeKeyOfMatcher{ + Map: element, + } +} + +// ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. +// By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // -//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) // -//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it -//is the only element passed in to ConsistOf: +// Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. // -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +// is the only element passed in to ConsistOf: // -//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. func ConsistOf(elements ...interface{}) types.GomegaMatcher { return &matchers.ConsistOfMatcher{ Elements: elements, } } -//ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. -//By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", ContainSubstring("Bar"))) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +// Actual must be an array or slice. +func HaveExactElements(elements ...interface{}) types.GomegaMatcher { + return &matchers.HaveExactElementsMatcher{ + Elements: elements, + } +} + +// ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. +// By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // -// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar")) -// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar")) +// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo")) // -//Actual must be an array, slice or map. -//For maps, ContainElements searches through the map's values. +// Actual must be an array, slice or map. +// For maps, ContainElements searches through the map's values. func ContainElements(elements ...interface{}) types.GomegaMatcher { return &matchers.ContainElementsMatcher{ Elements: elements, } } -//HaveKey succeeds if actual is a map with the passed in key. -//By default HaveKey uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +// HaveEach succeeds if actual solely contains elements that match the passed in element. +// Please note that if actual is empty, HaveEach always will succeed. +// By default HaveEach() uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo"))) +// +// Actual must be an array, slice or map. +// For maps, HaveEach searches through the map's values. +func HaveEach(element interface{}) types.GomegaMatcher { + return &matchers.HaveEachMatcher{ + Element: element, + } +} + +// HaveKey succeeds if actual is a map with the passed in key. +// By default HaveKey uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) func HaveKey(key interface{}) types.GomegaMatcher { return &matchers.HaveKeyMatcher{ Key: key, } } -//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. -//By default HaveKeyWithValue uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +// HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +// By default HaveKeyWithValue uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { return &matchers.HaveKeyWithValueMatcher{ Key: key, @@ -342,27 +419,27 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { } } -//HaveField succeeds if actual is a struct and the value at the passed in field -//matches the passed in matcher. By default HaveField used Equal() to perform the match, -//however a matcher can be passed in in stead. -// -//The field must be a string that resolves to the name of a field in the struct. Structs can be traversed -//using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked. -//Such methods must take no arguments and return a single value: -// -// type Book struct { -// Title string -// Author Person -// } -// type Person struct { -// FirstName string -// LastName string -// DOB time.Time -// } -// Expect(book).To(HaveField("Title", "Les Miserables")) -// Expect(book).To(HaveField("Title", ContainSubstring("Les")) -// Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) -// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) +// HaveField succeeds if actual is a struct and the value at the passed in field +// matches the passed in matcher. By default HaveField used Equal() to perform the match, +// however a matcher can be passed in in stead. +// +// The field must be a string that resolves to the name of a field in the struct. Structs can be traversed +// using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked. +// Such methods must take no arguments and return a single value: +// +// type Book struct { +// Title string +// Author Person +// } +// type Person struct { +// FirstName string +// LastName string +// DOB time.Time +// } +// Expect(book).To(HaveField("Title", "Les Miserables")) +// Expect(book).To(HaveField("Title", ContainSubstring("Les")) +// Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) +// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) func HaveField(field string, expected interface{}) types.GomegaMatcher { return &matchers.HaveFieldMatcher{ Field: field, @@ -370,6 +447,19 @@ func HaveField(field string, expected interface{}) types.GomegaMatcher { } } +// HaveExistingField succeeds if actual is a struct and the specified field +// exists. +// +// HaveExistingField can be combined with HaveField in order to cover use cases +// with optional fields. HaveField alone would trigger an error in such situations. +// +// Expect(MrHarmless).NotTo(And(HaveExistingField("Title"), HaveField("Title", "Supervillain"))) +func HaveExistingField(field string) types.GomegaMatcher { + return &matchers.HaveExistingFieldMatcher{ + Field: field, + } +} + // HaveValue applies the given matcher to the value of actual, optionally and // repeatedly dereferencing pointers or taking the concrete value of interfaces. // Thus, the matcher will always be applied to non-pointer and non-interface @@ -381,26 +471,27 @@ func HaveField(field string, expected interface{}) types.GomegaMatcher { // be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer // and even interface values. // -// actual := 42 -// Expect(actual).To(HaveValue(42)) -// Expect(&actual).To(HaveValue(42)) +// actual := 42 +// Expect(actual).To(HaveValue(42)) +// Expect(&actual).To(HaveValue(42)) func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher { return &matchers.HaveValueMatcher{ Matcher: matcher, } } -//BeNumerically performs numerical assertions in a type-agnostic way. -//Actual and expected should be numbers, though the specific type of -//number is irrelevant (float32, float64, uint8, etc...). +// BeNumerically performs numerical assertions in a type-agnostic way. +// Actual and expected should be numbers, though the specific type of +// number is irrelevant (float32, float64, uint8, etc...). // -//There are six, self-explanatory, supported comparators: -// Expect(1.0).Should(BeNumerically("==", 1)) -// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) -// Expect(1.0).Should(BeNumerically(">", 0.9)) -// Expect(1.0).Should(BeNumerically(">=", 1.0)) -// Expect(1.0).Should(BeNumerically("<", 3)) -// Expect(1.0).Should(BeNumerically("<=", 1.0)) +// There are six, self-explanatory, supported comparators: +// +// Expect(1.0).Should(BeNumerically("==", 1)) +// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Expect(1.0).Should(BeNumerically(">", 0.9)) +// Expect(1.0).Should(BeNumerically(">=", 1.0)) +// Expect(1.0).Should(BeNumerically("<", 3)) +// Expect(1.0).Should(BeNumerically("<=", 1.0)) func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { return &matchers.BeNumericallyMatcher{ Comparator: comparator, @@ -408,10 +499,11 @@ func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatc } } -//BeTemporally compares time.Time's like BeNumerically -//Actual and expected must be time.Time. The comparators are the same as for BeNumerically -// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) -// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +// BeTemporally compares time.Time's like BeNumerically +// Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// +// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) +// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { return &matchers.BeTemporallyMatcher{ Comparator: comparator, @@ -420,58 +512,61 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura } } -//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. -//It will return an error when one of the values is nil. -// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values -// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type -// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type -// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +// BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +// It will return an error when one of the values is nil. +// +// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values +// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { return &matchers.AssignableToTypeOfMatcher{ Expected: expected, } } -//Panic succeeds if actual is a function that, when invoked, panics. -//Actual must be a function that takes no arguments and returns no results. +// Panic succeeds if actual is a function that, when invoked, panics. +// Actual must be a function that takes no arguments and returns no results. func Panic() types.GomegaMatcher { return &matchers.PanicMatcher{} } -//PanicWith succeeds if actual is a function that, when invoked, panics with a specific value. -//Actual must be a function that takes no arguments and returns no results. +// PanicWith succeeds if actual is a function that, when invoked, panics with a specific value. +// Actual must be a function that takes no arguments and returns no results. +// +// By default PanicWith uses Equal() to perform the match, however a +// matcher can be passed in instead: // -//By default PanicWith uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) +// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) func PanicWith(expected interface{}) types.GomegaMatcher { return &matchers.PanicMatcher{Expected: expected} } -//BeAnExistingFile succeeds if a file exists. -//Actual must be a string representing the abs path to the file being checked. +// BeAnExistingFile succeeds if a file exists. +// Actual must be a string representing the abs path to the file being checked. func BeAnExistingFile() types.GomegaMatcher { return &matchers.BeAnExistingFileMatcher{} } -//BeARegularFile succeeds if a file exists and is a regular file. -//Actual must be a string representing the abs path to the file being checked. +// BeARegularFile succeeds if a file exists and is a regular file. +// Actual must be a string representing the abs path to the file being checked. func BeARegularFile() types.GomegaMatcher { return &matchers.BeARegularFileMatcher{} } -//BeADirectory succeeds if a file exists and is a directory. -//Actual must be a string representing the abs path to the file being checked. +// BeADirectory succeeds if a file exists and is a directory. +// Actual must be a string representing the abs path to the file being checked. func BeADirectory() types.GomegaMatcher { return &matchers.BeADirectoryMatcher{} } -//HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches. -//Actual must be either a *http.Response or *httptest.ResponseRecorder. -//Expected must be either an int or a string. -// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 -// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" -// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 +// HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches. +// Actual must be either a *http.Response or *httptest.ResponseRecorder. +// Expected must be either an int or a string. +// +// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 +// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" +// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { return &matchers.HaveHTTPStatusMatcher{Expected: expected} } @@ -494,63 +589,70 @@ func HaveHTTPBody(expected interface{}) types.GomegaMatcher { return &matchers.HaveHTTPBodyMatcher{Expected: expected} } -//And succeeds only if all of the given matchers succeed. -//The matchers are tried in order, and will fail-fast if one doesn't succeed. -// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// And succeeds only if all of the given matchers succeed. +// The matchers are tried in order, and will fail-fast if one doesn't succeed. +// +// Expect("hi").To(And(HaveLen(2), Equal("hi")) // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func And(ms ...types.GomegaMatcher) types.GomegaMatcher { return &matchers.AndMatcher{Matchers: ms} } -//SatisfyAll is an alias for And(). -// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +// SatisfyAll is an alias for And(). +// +// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { return And(matchers...) } -//Or succeeds if any of the given matchers succeed. -//The matchers are tried in order and will return immediately upon the first successful match. -// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// Or succeeds if any of the given matchers succeed. +// The matchers are tried in order and will return immediately upon the first successful match. +// +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { return &matchers.OrMatcher{Matchers: ms} } -//SatisfyAny is an alias for Or(). -// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +// SatisfyAny is an alias for Or(). +// +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { return Or(matchers...) } -//Not negates the given matcher; it succeeds if the given matcher fails. -// Expect(1).To(Not(Equal(2)) +// Not negates the given matcher; it succeeds if the given matcher fails. // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// Expect(1).To(Not(Equal(2)) +// +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func Not(matcher types.GomegaMatcher) types.GomegaMatcher { return &matchers.NotMatcher{Matcher: matcher} } -//WithTransform applies the `transform` to the actual value and matches it against `matcher`. -//The given transform must be either a function of one parameter that returns one value or a +// WithTransform applies the `transform` to the actual value and matches it against `matcher`. +// The given transform must be either a function of one parameter that returns one value or a // function of one parameter that returns two values, where the second value must be of the // error type. -// var plus1 = func(i int) int { return i + 1 } -// Expect(1).To(WithTransform(plus1, Equal(2)) // -// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" } -// Expect(1).To(WithTransform(failingplus1, Equal(2))) +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" } +// Expect(1).To(WithTransform(failingplus1, Equal(2))) // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { return matchers.NewWithTransformMatcher(transform, matcher) } -//Satisfy matches the actual value against the `predicate` function. -//The given predicate must be a function of one paramter that returns bool. -// var isEven = func(i int) bool { return i%2 == 0 } -// Expect(2).To(Satisfy(isEven)) +// Satisfy matches the actual value against the `predicate` function. +// The given predicate must be a function of one paramter that returns bool. +// +// var isEven = func(i int) bool { return i%2 == 0 } +// Expect(2).To(Satisfy(isEven)) func Satisfy(predicate interface{}) types.GomegaMatcher { return matchers.NewSatisfyMatcher(predicate) } diff --git a/api/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go new file mode 100644 index 000000000..8ab4bb919 --- /dev/null +++ b/api/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -0,0 +1,49 @@ +package matchers + +import ( + "bytes" + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/onsi/gomega/format" +) + +type BeComparableToMatcher struct { + Expected interface{} + Options cmp.Options +} + +func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + // Shortcut for byte slices. + // Comparing long byte slices with reflect.DeepEqual is very slow, + // so use bytes.Equal if actual and expected are both byte slices. + if actualByteSlice, ok := actual.([]byte); ok { + if expectedByteSlice, ok := matcher.Expected.([]byte); ok { + return bytes.Equal(actualByteSlice, expectedByteSlice), nil + } + } + + defer func() { + if r := recover(); r != nil { + success = false + if err, ok := r.(error); ok { + matchErr = err + } else if errMsg, ok := r.(string); ok { + matchErr = fmt.Errorf(errMsg) + } + } + }() + + return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil +} + +func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { + return cmp.Diff(matcher.Expected, actual, matcher.Options) +} + +func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", matcher.Expected) +} diff --git a/api/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go new file mode 100644 index 000000000..449a291ef --- /dev/null +++ b/api/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go @@ -0,0 +1,45 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeKeyOfMatcher struct { + Map interface{} +} + +func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(matcher.Map) { + return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type") + } + + if reflect.TypeOf(actual) == nil { + return false, fmt.Errorf("BeKeyOf matcher expects actual to be typed") + } + + var lastError error + for _, key := range reflect.ValueOf(matcher.Map).MapKeys() { + matcher := &EqualMatcher{Expected: key.Interface()} + success, err := matcher.Match(actual) + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map))) +} + +func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map))) +} diff --git a/api/vendor/github.com/onsi/gomega/matchers/consist_of.go b/api/vendor/github.com/onsi/gomega/matchers/consist_of.go index e8ef0dee1..f69037a4f 100644 --- a/api/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/api/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -48,11 +48,13 @@ func neighbours(value, matcher interface{}) (bool, error) { func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { for _, matcher := range matchers { - equalMatcher, ok := matcher.(*EqualMatcher) - if ok { - matcher = equalMatcher.Expected + if equalMatcher, ok := matcher.(*EqualMatcher); ok { + elements = append(elements, equalMatcher.Expected) + } else if _, ok := matcher.(*BeNilMatcher); ok { + elements = append(elements, nil) + } else { + elements = append(elements, matcher) } - elements = append(elements, matcher) } return } @@ -72,11 +74,13 @@ func flatten(elems []interface{}) []interface{} { func matchers(expectedElems []interface{}) (matchers []interface{}) { for _, e := range flatten(expectedElems) { - matcher, isMatcher := e.(omegaMatcher) - if !isMatcher { - matcher = &EqualMatcher{Expected: e} + if e == nil { + matchers = append(matchers, &BeNilMatcher{}) + } else if matcher, isMatcher := e.(omegaMatcher); isMatcher { + matchers = append(matchers, matcher) + } else { + matchers = append(matchers, &EqualMatcher{Expected: e}) } - matchers = append(matchers, matcher) } return } @@ -89,9 +93,14 @@ func presentable(elems []interface{}) interface{} { } sv := reflect.ValueOf(elems) - tt := sv.Index(0).Elem().Type() + firstEl := sv.Index(0) + if firstEl.IsNil() { + return elems + } + tt := firstEl.Elem().Type() for i := 1; i < sv.Len(); i++ { - if sv.Index(i).Elem().Type() != tt { + el := sv.Index(i) + if el.IsNil() || (sv.Index(i).Elem().Type() != tt) { return elems } } diff --git a/api/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 8d6c44c7a..3d45c9ebc 100644 --- a/api/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/api/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -11,6 +12,7 @@ import ( type ContainElementMatcher struct { Element interface{} + Result []interface{} } func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { @@ -18,6 +20,49 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) } + var actualT reflect.Type + var result reflect.Value + switch l := len(matcher.Result); { + case l > 1: + return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at") + case l == 1: + if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr { + return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s", + format.Object(matcher.Result[0], 1)) + } + actualT = reflect.TypeOf(actual) + resultReference := matcher.Result[0] + result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings + switch result.Kind() { + case reflect.Array: + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualT.Elem()).String(), result.Type().String()) + case reflect.Slice: + if !isArrayOrSlice(actual) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String()) + } + if !actualT.Elem().AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + case reflect.Map: + if !isMap(actual) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + if !actualT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + default: + if !actualT.Elem().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.Elem().String(), result.Type().String()) + } + } + } + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) if !elementIsMatcher { elemMatcher = &EqualMatcher{Expected: matcher.Element} @@ -25,30 +70,99 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e value := reflect.ValueOf(actual) var valueAt func(int) interface{} + + var getFindings func() reflect.Value + var foundAt func(int) + if isMap(actual) { keys := value.MapKeys() valueAt = func(i int) interface{} { return value.MapIndex(keys[i]).Interface() } + if result.Kind() != reflect.Invalid { + fm := reflect.MakeMap(actualT) + getFindings = func() reflect.Value { + return fm + } + foundAt = func(i int) { + fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + } + } } else { valueAt = func(i int) interface{} { return value.Index(i).Interface() } + if result.Kind() != reflect.Invalid { + var f reflect.Value + if result.Kind() == reflect.Slice { + f = reflect.MakeSlice(result.Type(), 0, 0) + } else { + f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { + return f + } + foundAt = func(i int) { + f = reflect.Append(f, value.Index(i)) + } + } } var lastError error for i := 0; i < value.Len(); i++ { - success, err := elemMatcher.Match(valueAt(i)) + elem := valueAt(i) + success, err := elemMatcher.Match(elem) if err != nil { lastError = err continue } if success { - return true, nil + if result.Kind() == reflect.Invalid { + return true, nil + } + foundAt(i) } } - return false, lastError + // when the expectation isn't interested in the findings except for success + // or non-success, then we're done here and return the last matcher error + // seen, if any, as well as non-success. + if result.Kind() == reflect.Invalid { + return false, lastError + } + + // pick up any findings the test is interested in as it specified a non-nil + // result reference. However, the expection always is that there are at + // least one or multiple findings. So, if a result is expected, but we had + // no findings, then this is an error. + findings := getFindings() + if findings.Len() == 0 { + return false, lastError + } + + // there's just a single finding and the result is neither a slice nor a map + // (so it's a scalar): pick the one and only finding and return it in the + // place the reference points to. + if findings.Len() == 1 && !isArrayOrSlice(result.Interface()) && !isMap(result.Interface()) { + if isMap(actual) { + miter := findings.MapRange() + miter.Next() + result.Set(miter.Value()) + } else { + result.Set(findings.Index(0)) + } + return true, nil + } + + // at least one or even multiple findings and a the result references a + // slice or a map, so all we need to do is to store our findings where the + // reference points to. + if !findings.Type().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return multiple findings. Need *%s, got *%s", + findings.Type().String(), result.Type().String()) + } + result.Set(findings) + return true, nil } func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/api/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go new file mode 100644 index 000000000..025b6e1ac --- /dev/null +++ b/api/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go @@ -0,0 +1,65 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveEachMatcher struct { + Element interface{} +} + +func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s", + format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + if value.Len() == 0 { + return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s", + format.Object(actual, 1)) + } + + var valueAt func(int) interface{} + if isMap(actual) { + keys := value.MapKeys() + valueAt = func(i int) interface{} { + return value.MapIndex(keys[i]).Interface() + } + } else { + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + } + + // if there are no elements, then HaveEach will match. + for i := 0; i < value.Len(); i++ { + success, err := elemMatcher.Match(valueAt(i)) + if err != nil { + return false, err + } + if !success { + return false, nil + } + } + + return true, nil +} + +// FailureMessage returns a suitable failure message. +func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +// NegatedFailureMessage returns a suitable negated failure message. +func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/api/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/api/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go new file mode 100644 index 000000000..dca5b9446 --- /dev/null +++ b/api/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -0,0 +1,88 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type mismatchFailure struct { + failure string + index int +} + +type HaveExactElementsMatcher struct { + Elements []interface{} + mismatchFailures []mismatchFailure + missingIndex int + extraIndex int +} + +func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { + matcher.resetState() + + if isMap(actual) { + return false, fmt.Errorf("error") + } + + matchers := matchers(matcher.Elements) + values := valuesOf(actual) + + lenMatchers := len(matchers) + lenValues := len(values) + + for i := 0; i < lenMatchers || i < lenValues; i++ { + if i >= lenMatchers { + matcher.extraIndex = i + continue + } + + if i >= lenValues { + matcher.missingIndex = i + return + } + + elemMatcher := matchers[i].(omegaMatcher) + match, err := elemMatcher.Match(values[i]) + if err != nil { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: err.Error(), + }) + } else if !match { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: elemMatcher.FailureMessage(values[i]), + }) + } + } + + return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil +} + +func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { + message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements)) + if matcher.missingIndex > 0 { + message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex) + } + if matcher.extraIndex > 0 { + message = fmt.Sprintf("%s\nthe extra elements start from index %d", message, matcher.extraIndex) + } + if len(matcher.mismatchFailures) != 0 { + message = fmt.Sprintf("%s\nthe mismatch indexes were:", message) + } + for _, mismatch := range matcher.mismatchFailures { + message = fmt.Sprintf("%s\n%d: %s", message, mismatch.index, mismatch.failure) + } + return +} + +func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) +} + +func (matcher *HaveExactElementsMatcher) resetState() { + matcher.mismatchFailures = nil + matcher.missingIndex = 0 + matcher.extraIndex = 0 +} diff --git a/api/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go new file mode 100644 index 000000000..b57018745 --- /dev/null +++ b/api/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go @@ -0,0 +1,36 @@ +package matchers + +import ( + "errors" + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveExistingFieldMatcher struct { + Field string +} + +func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) { + // we don't care about the field's actual value, just about any error in + // trying to find the field (or method). + _, err = extractField(actual, matcher.Field, "HaveExistingField") + if err == nil { + return true, nil + } + var mferr missingFieldError + if errors.As(err, &mferr) { + // missing field errors aren't errors in this context, but instead + // unsuccessful matches. + return false, nil + } + return false, err +} + +func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field) +} + +func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field) +} diff --git a/api/vendor/github.com/onsi/gomega/matchers/have_field.go b/api/vendor/github.com/onsi/gomega/matchers/have_field.go index e1fe934d5..6989f78c4 100644 --- a/api/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/api/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -8,7 +8,16 @@ import ( "github.com/onsi/gomega/format" ) -func extractField(actual interface{}, field string) (interface{}, error) { +// missingFieldError represents a missing field extraction error that +// HaveExistingFieldMatcher can ignore, as opposed to other, sever field +// extraction errors, such as nil pointers, et cetera. +type missingFieldError string + +func (e missingFieldError) Error() string { + return string(e) +} + +func extractField(actual interface{}, field string, matchername string) (interface{}, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -16,36 +25,39 @@ func extractField(actual interface{}, field string) (interface{}, error) { actualValue = actualValue.Elem() } if actualValue == (reflect.Value{}) { - return nil, fmt.Errorf("HaveField encountered nil while dereferencing a pointer of type %T.", actual) + return nil, fmt.Errorf("%s encountered nil while dereferencing a pointer of type %T.", matchername, actual) } if actualValue.Kind() != reflect.Struct { - return nil, fmt.Errorf("HaveField encountered:\n%s\nWhich is not a struct.", format.Object(actual, 1)) + return nil, fmt.Errorf("%s encountered:\n%s\nWhich is not a struct.", matchername, format.Object(actual, 1)) } var extractedValue reflect.Value if strings.HasSuffix(fields[0], "()") { extractedValue = actualValue.MethodByName(strings.TrimSuffix(fields[0], "()")) + if extractedValue == (reflect.Value{}) && actualValue.CanAddr() { + extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()")) + } if extractedValue == (reflect.Value{}) { - return nil, fmt.Errorf("HaveField could not find method named '%s' in struct of type %T.", fields[0], actual) + return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) } t := extractedValue.Type() if t.NumIn() != 0 || t.NumOut() != 1 { - return nil, fmt.Errorf("HaveField found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", fields[0], actual) + return nil, fmt.Errorf("%s found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", matchername, fields[0], actual) } extractedValue = extractedValue.Call([]reflect.Value{})[0] } else { extractedValue = actualValue.FieldByName(fields[0]) if extractedValue == (reflect.Value{}) { - return nil, fmt.Errorf("HaveField could not find field named '%s' in struct:\n%s", fields[0], format.Object(actual, 1)) + return nil, missingFieldError(fmt.Sprintf("%s could not find field named '%s' in struct:\n%s", matchername, fields[0], format.Object(actual, 1))) } } if len(fields) == 1 { return extractedValue.Interface(), nil } else { - return extractField(extractedValue.Interface(), fields[1]) + return extractField(extractedValue.Interface(), fields[1], matchername) } } @@ -58,7 +70,7 @@ type HaveFieldMatcher struct { } func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { - matcher.extractedField, err = extractField(actual, matcher.Field) + matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err } diff --git a/api/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go index 5bcfdd2ad..22a1b6730 100644 --- a/api/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go +++ b/api/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -31,5 +31,5 @@ func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message } func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred") + return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred") } diff --git a/api/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index c8993a86d..827475ea5 100644 --- a/api/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/api/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -25,7 +25,17 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e expected := matcher.Expected if isError(expected) { - return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil + // first try the built-in errors.Is + if errors.Is(actualErr, expected.(error)) { + return true, nil + } + // if not, try DeepEqual along the error chain + for unwrapped := actualErr; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) { + if reflect.DeepEqual(unwrapped, expected) { + return true, nil + } + } + return false, nil } if isString(expected) { diff --git a/api/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 0c83c2b63..2cb6b47db 100644 --- a/api/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/api/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/onsi/gomega/format" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) type MatchYAMLMatcher struct { diff --git a/api/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/api/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go index 721ed5529..327350f7b 100644 --- a/api/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go +++ b/api/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -1,11 +1,16 @@ package matchers import ( + "errors" "fmt" "github.com/onsi/gomega/format" ) +type formattedGomegaError interface { + FormattedGomegaError() string +} + type SucceedMatcher struct { } @@ -25,7 +30,11 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) + var fgErr formattedGomegaError + if errors.As(actual.(error), &fgErr) { + return fgErr.FormattedGomegaError() + } + return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1)) } func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { diff --git a/api/vendor/github.com/onsi/gomega/tools b/api/vendor/github.com/onsi/gomega/tools deleted file mode 100644 index e4195cf36..000000000 --- a/api/vendor/github.com/onsi/gomega/tools +++ /dev/null @@ -1,8 +0,0 @@ -//go:build tools -// +build tools - -package main - -import ( - _ "github.com/onsi/ginkgo/v2/ginkgo" -) diff --git a/api/vendor/github.com/onsi/gomega/types/types.go b/api/vendor/github.com/onsi/gomega/types/types.go index c315ef065..7c7adb941 100644 --- a/api/vendor/github.com/onsi/gomega/types/types.go +++ b/api/vendor/github.com/onsi/gomega/types/types.go @@ -1,12 +1,13 @@ package types import ( + "context" "time" ) type GomegaFailHandler func(message string, callerSkip ...int) -//A simple *testing.T interface wrapper +// A simple *testing.T interface wrapper type GomegaTestingT interface { Helper() Fatalf(format string, args ...interface{}) @@ -18,11 +19,11 @@ type Gomega interface { Expect(actual interface{}, extra ...interface{}) Assertion ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion - Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion - Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -30,9 +31,9 @@ type Gomega interface { SetDefaultConsistentlyPollingInterval(time.Duration) } -//All Gomega matchers must implement the GomegaMatcher interface +// All Gomega matchers must implement the GomegaMatcher interface // -//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers +// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers type GomegaMatcher interface { Match(actual interface{}) (success bool, err error) FailureMessage(actual interface{}) (message string) @@ -70,6 +71,11 @@ type AsyncAssertion interface { WithOffset(offset int) AsyncAssertion WithTimeout(interval time.Duration) AsyncAssertion WithPolling(interval time.Duration) AsyncAssertion + Within(timeout time.Duration) AsyncAssertion + ProbeEvery(interval time.Duration) AsyncAssertion + WithContext(ctx context.Context) AsyncAssertion + WithArguments(argsToForward ...interface{}) AsyncAssertion + MustPassRepeatedly(count int) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers diff --git a/api/vendor/golang.org/x/net/AUTHORS b/api/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/api/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/api/vendor/golang.org/x/net/CONTRIBUTORS b/api/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/api/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/api/vendor/golang.org/x/net/context/context.go b/api/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..cf66309c4 --- /dev/null +++ b/api/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/api/vendor/golang.org/x/net/context/go17.go b/api/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 000000000..2cb9c408f --- /dev/null +++ b/api/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, f +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, f +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/api/vendor/golang.org/x/net/context/go19.go b/api/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 000000000..64d31ecc3 --- /dev/null +++ b/api/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/api/vendor/golang.org/x/net/context/pre_go17.go b/api/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 000000000..7b6b68511 --- /dev/null +++ b/api/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,301 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/api/vendor/golang.org/x/net/context/pre_go19.go b/api/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 000000000..1f9715341 --- /dev/null +++ b/api/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,110 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/api/vendor/golang.org/x/net/html/doc.go b/api/vendor/golang.org/x/net/html/doc.go index 822ed42a0..2466ae3d9 100644 --- a/api/vendor/golang.org/x/net/html/doc.go +++ b/api/vendor/golang.org/x/net/html/doc.go @@ -92,6 +92,27 @@ example, to process each anchor node in depth-first order: The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and https://html.spec.whatwg.org/multipage/syntax.html#tokenization + +# Security Considerations + +Care should be taken when parsing and interpreting HTML, whether full documents +or fragments, within the framework of the HTML specification, especially with +regard to untrusted inputs. + +This package provides both a tokenizer and a parser, which implement the +tokenization, and tokenization and tree construction stages of the WHATWG HTML +parsing specification respectively. While the tokenizer parses and normalizes +individual HTML tokens, only the parser constructs the DOM tree from the +tokenized HTML, as described in the tree construction stage of the +specification, dynamically modifying or extending the docuemnt's DOM tree. + +If your use case requires semantically well-formed HTML documents, as defined by +the WHATWG specification, the parser should be used rather than the tokenizer. + +In security contexts, if trust decisions are being made using the tokenized or +parsed content, the input must be re-serialized (for instance by using Render or +Token.String) in order for those trust decisions to hold, as the process of +tokenization or parsing may alter the content. */ package html // import "golang.org/x/net/html" diff --git a/api/vendor/golang.org/x/net/html/escape.go b/api/vendor/golang.org/x/net/html/escape.go index d85613962..04c6bec21 100644 --- a/api/vendor/golang.org/x/net/html/escape.go +++ b/api/vendor/golang.org/x/net/html/escape.go @@ -193,6 +193,87 @@ func lower(b []byte) []byte { return b } +// escapeComment is like func escape but escapes its input bytes less often. +// Per https://github.com/golang/go/issues/58246 some HTML comments are (1) +// meaningful and (2) contain angle brackets that we'd like to avoid escaping +// unless we have to. +// +// "We have to" includes the '&' byte, since that introduces other escapes. +// +// It also includes those bytes (not including EOF) that would otherwise end +// the comment. Per the summary table at the bottom of comment_test.go, this is +// the '>' byte that, per above, we'd like to avoid escaping unless we have to. +// +// Studying the summary table (and T actions in its '>' column) closely, we +// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the +// start of the comment data. State 52 is after a '!'. The other three states +// are after a '-'. +// +// Our algorithm is thus to escape every '&' and to escape '>' if and only if: +// - The '>' is after a '!' or '-' (in the unescaped data) or +// - The '>' is at the start of the comment data (after the opening ""); err != nil { @@ -96,7 +96,7 @@ func render1(w writer, n *Node) error { if _, err := w.WriteString("') } +func childTextNodesAreLiteral(n *Node) bool { + // Per WHATWG HTML 13.3, if the parent of the current node is a style, + // script, xmp, iframe, noembed, noframes, or plaintext element, and the + // current node is a text node, append the value of the node's data + // literally. The specification is not explicit about it, but we only + // enforce this if we are in the HTML namespace (i.e. when the namespace is + // ""). + // NOTE: we also always include noscript elements, although the + // specification states that they should only be rendered as such if + // scripting is enabled for the node (which is not something we track). + if n.Namespace != "" { + return false + } + switch n.Data { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + return true + default: + return false + } +} + // writeQuoted writes s to w surrounded by quotes. Normally it will use double // quotes, but if s contains a double quote, it will use single quotes. // It is used for writing the identifiers in a doctype declaration. diff --git a/api/vendor/golang.org/x/net/html/token.go b/api/vendor/golang.org/x/net/html/token.go index 877709f99..de67f938a 100644 --- a/api/vendor/golang.org/x/net/html/token.go +++ b/api/vendor/golang.org/x/net/html/token.go @@ -110,9 +110,9 @@ func (t Token) String() string { case SelfClosingTagToken: return "<" + t.tagString() + "/>" case CommentToken: - return "" + return "" case DoctypeToken: - return "" + return "" } return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" } @@ -598,6 +598,11 @@ scriptDataDoubleEscapeEnd: // readComment reads the next comment token starting with "") return } @@ -628,17 +632,50 @@ func (z *Tokenizer) readComment() { if dashCount >= 2 { c = z.readByte() if z.err != nil { - z.data.end = z.raw.end + z.data.end = z.calculateAbruptCommentDataEnd() return - } - if c == '>' { + } else if c == '>' { z.data.end = z.raw.end - len("--!>") return + } else if c == '-' { + dashCount = 1 + beginning = false + continue } } } dashCount = 0 + beginning = false + } +} + +func (z *Tokenizer) calculateAbruptCommentDataEnd() int { + raw := z.Raw() + const prefixLen = len("