From 9d3553b020b02387a32bc05f64ad647d767feab5 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 21 Jul 2023 16:53:10 -0400 Subject: [PATCH 001/121] HTTP Grafana dashboard Signed-off-by: Alan Cha --- abn/service.go | 16 +- action/run.go | 1 + base/collect_grpc.go | 11 +- base/collect_http.go | 112 +++++++++- base/experiment.go | 8 + charts/iter8/templates/_experiment.tpl | 3 + charts/iter8/templates/_k-job.tpl | 3 + charts/iter8/values.yaml | 10 +- cmd/controllers.go | 8 +- cmd/controllers_test.go | 1 - cmd/klaunch.go | 2 +- go.mod | 8 +- go.sum | 18 +- metrics/server.go | 270 +++++++++++++++++++++---- metrics/server_test.go | 162 +++++++++++++++ storage/badgerdb/simple.go | 38 ++++ storage/badgerdb/simple_test.go | 49 +++++ storage/interface.go | 8 +- 18 files changed, 652 insertions(+), 76 deletions(-) diff --git a/abn/service.go b/abn/service.go index 1fc39ce89..844e87f4d 100644 --- a/abn/service.go +++ b/abn/service.go @@ -24,8 +24,11 @@ import ( ) const ( - // metricsDirEnv is the environment variable identifying the directory with metrics storage - metricsDirEnv = "METRICS_DIR" + // MetricsDirEnv is the environment variable identifying the directory with metrics storage + MetricsDirEnv = "METRICS_DIR" + + configEnv = "ABN_CONFIG_FILE" + defaultPortNumber = 50051 ) var ( @@ -85,11 +88,6 @@ func (server *abnServer) WriteMetric(ctx context.Context, metricMsg *pb.MetricVa ) } -const ( - configEnv = "ABN_CONFIG_FILE" - defaultPortNumber = 50051 -) - // abnConfig defines the configuration of the controllers type abnConfig struct { // Port is port number on which the abn gRPC service should listen @@ -119,8 +117,8 @@ func LaunchGRPCServer(opts []grpc.ServerOption, stopCh <-chan struct{}) error { grpcServer := grpc.NewServer(opts...) pb.RegisterABNServer(grpcServer, newServer()) - // configure metricsClient if needed - MetricsClient, err = badgerdb.GetClient(badger.DefaultOptions(os.Getenv(metricsDirEnv)), badgerdb.AdditionalOptions{}) + // configure MetricsClient if needed + MetricsClient, err = badgerdb.GetClient(badger.DefaultOptions(os.Getenv(MetricsDirEnv)), badgerdb.AdditionalOptions{}) if err != nil { log.Logger.Error("Unable to configure metrics storage client ", err) return err diff --git a/action/run.go b/action/run.go index c9e2a7df5..8a3f01d80 100644 --- a/action/run.go +++ b/action/run.go @@ -32,5 +32,6 @@ func (rOpts *RunOpts) KubeRun() error { if err := rOpts.KubeDriver.InitKube(); err != nil { return err } + return base.RunExperiment(rOpts.ReuseResult, rOpts.KubeDriver) } diff --git a/base/collect_grpc.go b/base/collect_grpc.go index 60afcedc5..bdc08cdca 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -1,7 +1,9 @@ package base import ( + "encoding/json" "fmt" + "os" "time" "github.com/bojand/ghz/runner" @@ -96,7 +98,7 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) // merge endpoint options with baseline options if err := mergo.Merge(&endpoint, t.With.Config); err != nil { - log.Logger.Error(fmt.Sprintf("could not merge Fortio options for endpoint \"%s\"", endpointID)) + log.Logger.Error(fmt.Sprintf("could not merge ghz options for endpoint \"%s\"", endpointID)) return nil, err } eOpts := runner.WithConfig(&endpoint) // endpoint options @@ -121,6 +123,13 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) return results, err } + igrJSON, _ := json.Marshal(igr) + f, _ := os.Create("ghz.json") + defer f.Close() + f.Write(igrJSON) + + f.Sync() + results[gRPCMetricPrefix] = igr } diff --git a/base/collect_http.go b/base/collect_http.go index 5dbe91fcb..8abe872a2 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -1,8 +1,12 @@ package base import ( + "bytes" + "encoding/json" "fmt" "io" + "net/http" + "net/url" "os" "time" @@ -59,6 +63,15 @@ type collectHTTPInputs struct { Endpoints map[string]endpoint `json:"endpoints" yaml:"endpoints"` } +// FortioResult +type FortioResult struct { + // key is the endpoint + EndpointResults map[string]fhttp.HTTPRunnerResults + + // TODO: add type + Summary interface{} +} + const ( // CollectHTTPTaskName is the name of this task which performs load generation and metrics collection. CollectHTTPTaskName = "http" @@ -82,6 +95,10 @@ const ( // prefix used in latency percentile metric names // example: latency-p75.0 is the 75th percentile latency builtInHTTPLatencyPercentilePrefix = "latency-p" + + // TODO: move elsewhere, abn/service seems to produce cyclical dependency + // MetricsServerURL is the URL of the metrics server + MetricsServerURL = "METRICS_SERVER_URL" ) var ( @@ -97,6 +114,7 @@ func (t *collectHTTPTask) errorCode(code int) bool { if code == -1 { return true } + // HTTP errors for _, lims := range t.With.ErrorRanges { // if no lower limit (check upper) @@ -218,14 +236,59 @@ func getFortioOptions(c endpoint) (*fhttp.HTTPRunnerOptions, error) { return fo, nil } +// TODO: rename to /performanceResult +// putResultToMetricsService +func putResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { + // handle URL and URL parameters + u, _ := url.ParseRequestURI(metricsServerURL + "/result") + params := url.Values{} + params.Add("namespace", namespace) + params.Add("experiment", experiment) + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) + + log.Logger.Trace(fmt.Sprintf("PUT request URL: %s", urlStr)) + + // handle payload + dataBytes, err := json.Marshal(data) + if err != nil { + log.Logger.Error("cannot JSON marshal data for metrics server request: ", err) + return err + } + + // create request + req, err := http.NewRequest(http.MethodPut, urlStr, bytes.NewBuffer(dataBytes)) + if err != nil { + log.Logger.Error("cannot create new HTTP request metrics server: ", err) + return err + } + + req.Header.Set("Content-Type", "application/json") + + log.Logger.Trace("sending request") + + // send request + client := &http.Client{} + _, err = client.Do(req) + if err != nil { + log.Logger.Error("could not send request to metrics server: ", err) + return err + } + + log.Logger.Trace("sent request") + + return nil +} + // getFortioResults collects Fortio run results // func (t *collectHTTPTask) getFortioResults() (*fhttp.HTTPRunnerResults, error) { // key is the metric prefix -func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResults, error) { +// key is the endpoint +func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults, error) { // the main idea is to run Fortio with proper options var err error - results := map[string]*fhttp.HTTPRunnerResults{} + results := map[string]fhttp.HTTPRunnerResults{} if len(t.With.Endpoints) > 0 { log.Logger.Trace("multiple endpoints") for endpointID, endpoint := range t.With.Endpoints { @@ -254,7 +317,12 @@ func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResult continue } - results[httpMetricPrefix+"-"+endpointID] = ifr + // TODO: does ifr need to be a pointer? + // results[httpMetricPrefix+"-"+endpointID] = ifr + results[endpoint.URL] = *ifr + + // TODO: namespace and experiment name + // putData(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, ifr) } } else { fo, err := getFortioOptions(t.With.endpoint) @@ -273,12 +341,21 @@ func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResult return results, err } - results[httpMetricPrefix] = ifr + // TODO: does ifr need to be a pointer? + // results[httpMetricPrefix] = ifr + results[t.With.endpoint.URL] = *ifr } return results, err } +type fortioResult struct { + // key is the endpoint + EndpointResults map[string]fhttp.HTTPRunnerResults + + Summary interface{} +} + // run executes this task func (t *collectHTTPTask) run(exp *Experiment) error { err := t.validateInputs() @@ -294,6 +371,12 @@ func (t *collectHTTPTask) run(exp *Experiment) error { return err } + // TODO: warmup option + // // ignore results if warmup + // if t.With.Warmup != nil && *t.With.Warmup { + // log.Logger.Debug("warmup: ignoring results") + // return nil + // } // ignore results if warmup if t.With.Warmup != nil && *t.With.Warmup { log.Logger.Debug("warmup: ignoring results") @@ -308,6 +391,7 @@ func (t *collectHTTPTask) run(exp *Experiment) error { } in := exp.Result.Insights + // TODO: delete for provider, data := range data { // request count m := provider + "/" + builtInHTTPRequestCountID @@ -419,6 +503,26 @@ func (t *collectHTTPTask) run(exp *Experiment) error { } } + // push data to metrics service + fortioResult := fortioResult{ + EndpointResults: data, + Summary: exp.Result.Insights, + } + + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } + + // TODO: remove + fortioResultBytes, _ := json.Marshal(fortioResult) + log.Logger.Trace(string(fortioResultBytes)) + + putResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult) + return nil } diff --git a/base/experiment.go b/base/experiment.go index 92e4a4289..0c8519e54 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -33,8 +33,16 @@ type Task interface { // ExperimentSpec specifies the set of tasks in this experiment type ExperimentSpec []Task +type ExperimentMetadata struct { + Name string + + Namespace string +} + // Experiment struct containing spec and result type Experiment struct { + Metadata ExperimentMetadata + // Spec is the sequence of tasks that constitute this experiment Spec ExperimentSpec `json:"spec" yaml:"spec"` diff --git a/charts/iter8/templates/_experiment.tpl b/charts/iter8/templates/_experiment.tpl index 9ba716932..f49b1374b 100644 --- a/charts/iter8/templates/_experiment.tpl +++ b/charts/iter8/templates/_experiment.tpl @@ -2,6 +2,9 @@ {{- if not .Values.tasks }} {{- fail ".Values.tasks is empty" }} {{- end }} +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} spec: {{- range .Values.tasks }} {{- if eq "assess" . }} diff --git a/charts/iter8/templates/_k-job.tpl b/charts/iter8/templates/_k-job.tpl index aac373cdc..59b906ca8 100644 --- a/charts/iter8/templates/_k-job.tpl +++ b/charts/iter8/templates/_k-job.tpl @@ -33,6 +33,9 @@ spec: - ALL runAsNonRoot: true runAsUser: 1001040000 + env: + - name: METRICS_SERVER_URL + value: "{{ .Values.metricsServerURL }}" restartPolicy: Never backoffLimit: 0 {{- end }} diff --git a/charts/iter8/values.yaml b/charts/iter8/values.yaml index 0344d00d0..27990b3e4 100644 --- a/charts/iter8/values.yaml +++ b/charts/iter8/values.yaml @@ -9,11 +9,17 @@ runner: none logLevel: info -### Resource limits +abnmetrics: + endpoint: iter8-abn:50051 + +### resources are the resource limits for the pods resources: requests: memory: "64Mi" cpu: "250m" limits: memory: "128Mi" - cpu: "500m" \ No newline at end of file + cpu: "500m" + +### metricsServerURL is the URL to the Metrics server +metricsServerURL: http://iter8.default:8080 \ No newline at end of file diff --git a/cmd/controllers.go b/cmd/controllers.go index 134c67fc1..ffce71f5b 100644 --- a/cmd/controllers.go +++ b/cmd/controllers.go @@ -52,16 +52,16 @@ func newControllersCmd(stopCh <-chan struct{}, client k8sclient.Interface) *cobr var err error client, err = k8sclient.New(settings) if err != nil { - log.Logger.Error("could not obtain Kube client ... ") + log.Logger.Error("could not obtain Kube client... ") return err } } if err := controllers.Start(stopCh, client); err != nil { - log.Logger.Error("controllers did not start ... ") + log.Logger.Error("controllers did not start... ") return err } - log.Logger.Debug("started controllers ... ") + log.Logger.Debug("started controllers... ") // launch gRPC server to respond to frontend requests go func() { @@ -84,7 +84,7 @@ func newControllersCmd(stopCh <-chan struct{}, client k8sclient.Interface) *cobr sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGTERM, os.Interrupt) <-sigCh - log.Logger.Warn("SIGTERM ... ") + log.Logger.Warn("SIGTERM... ") } return nil diff --git a/cmd/controllers_test.go b/cmd/controllers_test.go index 969fcbbd5..085a2ec75 100644 --- a/cmd/controllers_test.go +++ b/cmd/controllers_test.go @@ -25,5 +25,4 @@ func TestControllers(t *testing.T) { cmd := newControllersCmd(ctx.Done(), kubeClient) err := cmd.RunE(cmd, nil) assert.NoError(t, err) - } diff --git a/cmd/klaunch.go b/cmd/klaunch.go index d6e22277e..1ba923832 100644 --- a/cmd/klaunch.go +++ b/cmd/klaunch.go @@ -26,7 +26,7 @@ Use the dry option to simulate a Kubernetes experiment. This creates the manifes The launch command creates the 'charts' subdirectory under the current working directory, downloads the Iter8 experiment chart, and places it under 'charts'. This behavior can be controlled using various launch flags. -This command supports setting values using the same mechanisms as in Helm. Please see https://helm.sh/docs/chart_template_guide/values_files/ for more detailed descriptions. In particular, this command supports the --set, --set-file, --set-string, and -f (--values) options all of which have the same behavior as in Helm. +This command supports setting values using the same mechanisms as in Helm. Please see https://helm.sh/docs/chart_template_guide/values_files/ for more detailed descriptions. In particular, this command supports the --set, --set-file, --set-string, and -f (--values) options all of which have the same behavior as in Helm. ` // newKLaunchCmd creates the Kubernetes launch command diff --git a/go.mod b/go.mod index 45dc38f41..af256ecaa 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ retract ( ) require ( - fortio.org/fortio v1.54.0 + fortio.org/fortio v1.57.3 github.com/Masterminds/sprig v2.22.0+incompatible github.com/antonmedv/expr v1.12.5 github.com/bojand/ghz v0.114.0 @@ -40,7 +40,7 @@ require ( golang.org/x/sys v0.10.0 golang.org/x/text v0.11.0 gonum.org/v1/plot v0.13.0 - google.golang.org/grpc v1.56.1 + google.golang.org/grpc v1.56.2 google.golang.org/protobuf v1.31.0 helm.sh/helm/v3 v3.11.2 k8s.io/api v0.26.3 @@ -53,8 +53,8 @@ require ( cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect fortio.org/dflag v1.5.2 // indirect - fortio.org/log v1.3.0 // indirect - fortio.org/sets v1.0.2 // indirect + fortio.org/log v1.7.0 // indirect + fortio.org/sets v1.0.3 // indirect fortio.org/version v1.0.2 // indirect git.sr.ht/~sbinet/gg v0.4.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect diff --git a/go.sum b/go.sum index 78afe72e3..1f64ef671 100644 --- a/go.sum +++ b/go.sum @@ -41,15 +41,15 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -fortio.org/assert v1.1.4 h1:Za1RaG+OjsTMpQS3J3UCvTF6wc4+IOHCz+jAOU37Y4o= +fortio.org/assert v1.2.0 h1:XscfvR8yp4xW7OMCvNbCsieRFDxlwdEcb69+JZRp6LA= fortio.org/dflag v1.5.2 h1:F9XVRj4Qr2IbJP7BMj7XZc9wB0Q/RZ61Ool+4YPVad8= fortio.org/dflag v1.5.2/go.mod h1:ppb/A8u+KKg+qUUYZNYuvRnXuVb8IsdHb/XGzsmjkN8= -fortio.org/fortio v1.54.0 h1:2jn8yTd6hcIEoKY4CjI0lI6XxTWVxsMYF2bMiWOmv+Y= -fortio.org/fortio v1.54.0/go.mod h1:SRaZbikL31UoAkw0On2hwpvHrQ0rRVnsAz3UGVNvMRw= -fortio.org/log v1.3.0 h1:bESPvuQGKejw7rrx41Sg3GoF+tsrB7oC08PxBs5/AM0= -fortio.org/log v1.3.0/go.mod h1:u/8/2lyczXq52aT5Nw6reD+3cR6m/EbS2jBiIYhgiTU= -fortio.org/sets v1.0.2 h1:gSWZFg9rgzl1zJfI/93lDJKBFw8WZ3Uxe3oQ5uDM4T4= -fortio.org/sets v1.0.2/go.mod h1:xVjulHr0FhlmReSymI+AhDtQ4FgjiazQ3JmuNpYFMs8= +fortio.org/fortio v1.57.3 h1:kdPlBiws3cFsLcssZxCt2opFmHj14C3yPBokFhMWzmg= +fortio.org/fortio v1.57.3/go.mod h1:ykSkArQICajFCvasfgrpE82Fc4sQ+f9Pm1dKIvducaA= +fortio.org/log v1.7.0 h1:4MbU81zqe/3RYuHpXADNgJwd2KEMAwmMUtuF5qtZTug= +fortio.org/log v1.7.0/go.mod h1:u/8/2lyczXq52aT5Nw6reD+3cR6m/EbS2jBiIYhgiTU= +fortio.org/sets v1.0.3 h1:HzewdGjH69YmyW06yzplL35lGr+X4OcqQt0qS6jbaO4= +fortio.org/sets v1.0.3/go.mod h1:QZVj0r6KP/ZD9ebySW9SgxVNy/NjghUfyHW9NN+WU+4= fortio.org/version v1.0.2 h1:8NwxdX58aoeKx7T5xAPO0xlUu1Hpk42nRz5s6e6eKZ0= fortio.org/version v1.0.2/go.mod h1:2JQp9Ax+tm6QKiGuzR5nJY63kFeANcgrZ0osoQFDVm0= git.sr.ht/~sbinet/cmpimg v0.1.0 h1:E0zPRk2muWuCqSKSVZIWsgtU9pjsw3eKHi8VmQeScxo= @@ -1131,8 +1131,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= -google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/metrics/server.go b/metrics/server.go index 89b2343e8..c5b07f400 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "io" "math" "net/http" "reflect" @@ -17,6 +18,9 @@ import ( "github.com/iter8-tools/iter8/storage" "github.com/montanaflynn/stats" "gonum.org/v1/plot/plotter" + + "fortio.org/fortio/fhttp" + fstats "fortio.org/fortio/stats" ) const ( @@ -30,6 +34,55 @@ type metricsConfig struct { Port *int `json:"port,omitempty"` } +// versionSummarizedMetric adds version to summary data +type versionSummarizedMetric struct { + Version int + storage.SummarizedMetric +} + +// grafanaHistogram represents the histogram in the Grafana Iter8 dashboard +type grafanaHistogram []grafanaHistogramBucket + +// grafanaHistogramBucket represents a bucket in the histogram in the Grafana Iter8 dashboard +type grafanaHistogramBucket struct { + // Version is the version of the application + Version string + + // Bucket is the bucket of the histogram + // For example: 8-12 + Bucket string + + // Value is the number of points in this bucket + Value float64 +} + +// metricSummary is result for a metric +type metricSummary struct { + HistogramsOverTransactions *grafanaHistogram + HistogramsOverUsers *grafanaHistogram + SummaryOverTransactions []*versionSummarizedMetric + SummaryOverUsers []*versionSummarizedMetric +} + +// httpEndpointPanel is the data needed to produce a single panel for +type httpEndpointPanel struct { + Durations grafanaHistogram + Statistics storage.SummarizedMetric + + ErrorDurations grafanaHistogram `json:"Error durations"` + ErrorStatistics storage.SummarizedMetric `json:"Error statistics"` + + ReturnCodes map[int]int64 `json:"Return codes"` +} + +type httpDashboard struct { + // key is the endpoint + Endpoints map[string]httpEndpointPanel + + // TODO: add type + Summary interface{} +} + var allRoutemaps controllers.AllRouteMapsInterface = &controllers.DefaultRoutemaps{} // Start starts the HTTP server @@ -48,6 +101,8 @@ func Start(stopCh <-chan struct{}) error { // configure endpoints http.HandleFunc("/metrics", getMetrics) + http.HandleFunc("/result", putResult) + http.HandleFunc("/HTTPGrafana", getHTTPGrafana) // configure HTTP server server := &http.Server{ @@ -73,36 +128,6 @@ func Start(stopCh <-chan struct{}) error { return nil } -// VersionSummarizedMetric adds version to summary data -type VersionSummarizedMetric struct { - Version int - storage.SummarizedMetric -} - -// GrafanaHistogram represents the histogram in the Grafana Iter8 dashboard -type GrafanaHistogram []GrafanaHistogramBucket - -// GrafanaHistogramBucket represents a bucket in the histogram in the Grafana Iter8 dashboard -type GrafanaHistogramBucket struct { - // Version is the version of the application - Version string - - // Bucket is the bucket of the histogram - // For example: 8-12 - Bucket string - - // Value is the number of points in this bucket - Value float64 -} - -// MetricSummary is result for a metric -type MetricSummary struct { - HistogramsOverTransactions *GrafanaHistogram - HistogramsOverUsers *GrafanaHistogram - SummaryOverTransactions []*VersionSummarizedMetric - SummaryOverUsers []*VersionSummarizedMetric -} - // getMetrics handles GET /metrics with query parameter application=namespace/name func getMetrics(w http.ResponseWriter, r *http.Request) { log.Logger.Trace("getMetrics called") @@ -131,7 +156,7 @@ func getMetrics(w http.ResponseWriter, r *http.Request) { log.Logger.Tracef("getMetrics found routemap %v", rm) // initialize result - result := make(map[string]*MetricSummary, 0) + result := make(map[string]*metricSummary, 0) byMetricOverTransactions := make(map[string](map[string][]float64), 0) byMetricOverUsers := make(map[string](map[string][]float64), 0) @@ -156,11 +181,11 @@ func getMetrics(w http.ResponseWriter, r *http.Request) { _, ok := result[metric] if !ok { // no entry for metric result; create empty entry - result[metric] = &MetricSummary{ + result[metric] = &metricSummary{ HistogramsOverTransactions: nil, HistogramsOverUsers: nil, - SummaryOverTransactions: []*VersionSummarizedMetric{}, - SummaryOverUsers: []*VersionSummarizedMetric{}, + SummaryOverTransactions: []*versionSummarizedMetric{}, + SummaryOverUsers: []*versionSummarizedMetric{}, } } @@ -171,7 +196,7 @@ func getMetrics(w http.ResponseWriter, r *http.Request) { log.Logger.Debugf("unable to compute summaried metrics over transactions for application %s (version %d; signature %s)", application, v, *signature) continue } else { - entry.SummaryOverTransactions = append(entry.SummaryOverTransactions, &VersionSummarizedMetric{ + entry.SummaryOverTransactions = append(entry.SummaryOverTransactions, &versionSummarizedMetric{ Version: v, SummarizedMetric: smT, }) @@ -182,14 +207,13 @@ func getMetrics(w http.ResponseWriter, r *http.Request) { log.Logger.Debugf("unable to compute summaried metrics over users for application %s (version %d; signature %s)", application, v, *signature) continue } - entry.SummaryOverUsers = append(entry.SummaryOverUsers, &VersionSummarizedMetric{ + entry.SummaryOverUsers = append(entry.SummaryOverUsers, &versionSummarizedMetric{ Version: v, SummarizedMetric: smU, }) result[metric] = entry // copy data into structure for histogram calculation (to be done later) - // over transaction data vStr := fmt.Sprintf("%d", v) // over transaction data _, ok = byMetricOverTransactions[metric] @@ -290,7 +314,7 @@ func calculateSummarizedMetric(data []float64) (storage.SummarizedMetric, error) // For example: "-0.24178488465151116 - 0.24782423875427073" -> "-0.242 - 0.248" // // TODO: defaults for numBuckets/decimalPlace? -func calculateHistogram(versionMetrics map[string][]float64, numBuckets int, decimalPlace float64) (GrafanaHistogram, error) { +func calculateHistogram(versionMetrics map[string][]float64, numBuckets int, decimalPlace float64) (grafanaHistogram, error) { if numBuckets == 0 { numBuckets = 10 } @@ -322,7 +346,7 @@ func calculateHistogram(versionMetrics map[string][]float64, numBuckets int, dec return nil, fmt.Errorf("cannot create version maximum: %e", err) } - grafanaHistogram := GrafanaHistogram{} + grafanaHistogram := grafanaHistogram{} for version, metrics := range versionMetrics { // convert the raw values to the gonum plot values @@ -347,7 +371,7 @@ func calculateHistogram(versionMetrics map[string][]float64, numBuckets int, dec count-- } - grafanaHistogram = append(grafanaHistogram, GrafanaHistogramBucket{ + grafanaHistogram = append(grafanaHistogram, grafanaHistogramBucket{ Version: version, Bucket: bucketLabel(bin.Min, bin.Max, decimalPlace), Value: count, @@ -370,3 +394,169 @@ func roundDecimal(x float64, decimalPlace float64) float64 { func bucketLabel(min, max float64, decimalPlace float64) string { return fmt.Sprintf("%s - %s", strconv.FormatFloat(roundDecimal(min, decimalPlace), 'f', -1, 64), strconv.FormatFloat(roundDecimal(max, decimalPlace), 'f', -1, 64)) } + +// calculateHistogram creates histograms based on Fortio result +func getFortioCalculateHistogram(fortioHistogram []fstats.Bucket, decimalPlace float64) grafanaHistogram { + grafanaHistogram := grafanaHistogram{} + + for _, bucket := range fortioHistogram { + grafanaHistogram = append(grafanaHistogram, grafanaHistogramBucket{ + Version: "0", + Bucket: bucketLabel(bucket.Start*1000, bucket.End*1000, decimalPlace), + Value: float64(bucket.Count), + }) + } + + return grafanaHistogram +} + +func getFortioHistogramStats(fortioHistogram *fstats.HistogramData, decimalPlace float64) storage.SummarizedMetric { + return storage.SummarizedMetric{ + Count: uint64(fortioHistogram.Count), + Mean: fortioHistogram.Avg * 1000, + StdDev: fortioHistogram.StdDev * 1000, + Min: fortioHistogram.Min * 1000, + Max: fortioHistogram.Max * 1000, + } +} + +func getFortioEndpointPanel(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpointPanel { + result := httpEndpointPanel{} + if httpRunnerResults.DurationHistogram != nil { + result.Durations = getFortioCalculateHistogram(httpRunnerResults.DurationHistogram.Data, 1) + result.Statistics = getFortioHistogramStats(httpRunnerResults.DurationHistogram, 1) + } + + if httpRunnerResults.ErrorsDurationHistogram != nil { + result.ErrorDurations = getFortioCalculateHistogram(httpRunnerResults.ErrorsDurationHistogram.Data, 1) + result.ErrorStatistics = getFortioHistogramStats(httpRunnerResults.ErrorsDurationHistogram, 1) + } + + result.ReturnCodes = httpRunnerResults.RetCodes + + return result +} + +func getFortioDashboard(fortioResult util.FortioResult) httpDashboard { + // add endpoint results + dashboard := httpDashboard{ + Endpoints: map[string]httpEndpointPanel{}, + } + + for endpoint, endpointResult := range fortioResult.EndpointResults { + // TODO: endpointResult := endpointResult? + dashboard.Endpoints[endpoint] = getFortioEndpointPanel(&endpointResult) + } + + // add summary + dashboard.Summary = fortioResult.Summary + + return dashboard +} + +// putResult handles PUT /result with query parameter application=namespace/name +func putResult(w http.ResponseWriter, r *http.Request) { + log.Logger.Trace("putResult called") + defer log.Logger.Trace("putResult completed") + + // verify method + if r.Method != http.MethodPut { + http.Error(w, "expected PUT", http.StatusMethodNotAllowed) + return + } + + // verify request (query parameter) + // Key: kt-result::my-namespace::my-experiment-name::my-endpoint + // Should namespace and experiment name come from application? + namespace := r.URL.Query().Get("namespace") + if namespace == "" { + http.Error(w, "no namespace specified", http.StatusBadRequest) + } + + experiment := r.URL.Query().Get("experiment") + if experiment == "" { + http.Error(w, "no experiment specified", http.StatusBadRequest) + } + + log.Logger.Tracef("putResult called for namespace %s and experiment %s", namespace, experiment) + + defer r.Body.Close() + body, err := io.ReadAll(r.Body) + if err != nil { + errorMessage := fmt.Sprintf("cannot read request body: %e", err) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + + // TODO: 201 for new resource, 200 for update + err = abn.MetricsClient.SetResult(namespace, experiment, body) + if err != nil { + errorMessage := fmt.Sprintf("cannot store result in storage client: %s: %e", string(body), err) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } +} + +// getHTTPGrafana handles GET /getHTTPGrafana with query parameter application=namespace/name +func getHTTPGrafana(w http.ResponseWriter, r *http.Request) { + log.Logger.Trace("getHTTPGrafana called") + defer log.Logger.Trace("getHTTPGrafana completed") + + // verify method + if r.Method != http.MethodGet { + http.Error(w, "expected GET", http.StatusMethodNotAllowed) + return + } + + // verify request (query parameter) + // Key: kt-result::my-namespace::my-experiment-name::my-endpoint + // Should namespace and experiment name come from application? + namespace := r.URL.Query().Get("namespace") + if namespace == "" { + http.Error(w, "no namespace specified", http.StatusBadRequest) + return + } + + experiment := r.URL.Query().Get("experiment") + if experiment == "" { + http.Error(w, "no experiment specified", http.StatusBadRequest) + return + } + + log.Logger.Tracef("getHTTPGrafana called for namespace %s and experiment %s", namespace, experiment) + + // get result from metrics client + result, err := abn.MetricsClient.GetResult(namespace, experiment) + if err != nil { + errorMessage := fmt.Sprintf("cannot get result with namespace %s, experiment %s", namespace, experiment) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + + // TODO: should these functions belong in collect_http.go? Or be somewhere closeby? + // These functions are only for the purpose of processing the results of collect_http.go + fortioResult := util.FortioResult{} + err = json.Unmarshal(result, &fortioResult) + if err != nil { + errorMessage := fmt.Sprintf("cannot JSON unmarshal result into FortioResult: \"%s\"", string(result)) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + + // JSON marshal the dashboard + dashboardBytes, err := json.Marshal(getFortioDashboard(fortioResult)) + if err != nil { + errorMessage := "cannot JSON marshal HTTP dashboard" + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusInternalServerError) + return + } + + // finally, send response + w.Header().Add("Content-Type", "application/json") + _, _ = w.Write(dashboardBytes) +} diff --git a/metrics/server_test.go b/metrics/server_test.go index d0b040400..95101af05 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + fstats "fortio.org/fortio/stats" "github.com/dgraph-io/badger/v4" "github.com/iter8-tools/iter8/abn" util "github.com/iter8-tools/iter8/base" @@ -336,3 +337,164 @@ func getTestRM(namespace, name string) *testroutemap { } } + +func TestFortioCalculateHistogram(t *testing.T) { + data := []fstats.Bucket{ + { + Interval: fstats.Interval{ + Start: 0.005229875, + End: 0.006, + }, + Percent: 2, + Count: 2, + }, + { + Interval: fstats.Interval{ + Start: 0.006, + End: 0.007, + }, + Percent: 5, + Count: 3, + }, + { + Interval: fstats.Interval{ + Start: 0.007, + End: 0.008, + }, + Percent: 6, + Count: 1, + }, + { + Interval: fstats.Interval{ + Start: 0.009000000000000001, + End: 0.01, + }, + Percent: 7, + Count: 1, + }, + { + Interval: fstats.Interval{ + Start: 0.01, + End: 0.011, + }, + Percent: 12, + Count: 5, + }, + { + Interval: fstats.Interval{ + Start: 0.011, + End: 0.012, + }, + Percent: 15, + Count: 3, + }, + { + Interval: fstats.Interval{ + Start: 0.012, + End: 0.014, + }, + Percent: 22, + Count: 7, + }, + { + Interval: fstats.Interval{ + Start: 0.014, + End: 0.016, + }, + Percent: 26, + Count: 4, + }, + { + Interval: fstats.Interval{ + Start: 0.016, + End: 0.018000000000000002, + }, + Percent: 37, + Count: 11, + }, + { + Interval: fstats.Interval{ + Start: 0.018000000000000002, + End: 0.02, + }, + Percent: 42, + Count: 5, + }, + { + Interval: fstats.Interval{ + Start: 0.02, + End: 0.025, + }, + Percent: 57, + Count: 15, + }, + { + Interval: fstats.Interval{ + Start: 0.025, + End: 0.03, + }, + Percent: 70, + Count: 13, + }, + { + Interval: fstats.Interval{ + Start: 0.03, + End: 0.035, + }, + Percent: 79, + Count: 9, + }, + { + Interval: fstats.Interval{ + Start: 0.035, + End: 0.04, + }, + Percent: 86, + Count: 7, + }, + { + Interval: fstats.Interval{ + Start: 0.04, + End: 0.045, + }, + Percent: 95, + Count: 9, + }, + { + Interval: fstats.Interval{ + Start: 0.045, + End: 0.05, + }, + Percent: 97, + Count: 2, + }, + { + Interval: fstats.Interval{ + Start: 0.05, + End: 0.051404375, + }, + Percent: 100, + Count: 3, + }, + } + + histogram := getFortioCalculateHistogram(data, 1) + + histogramJSON, _ := json.Marshal(histogram) + fmt.Println(string(histogramJSON)) +} + +func TestFortioHistogramStats(t *testing.T) { + result := "{\"EndpointResults\":{\"http://httpbin.default/get\":{\"RunType\":\"HTTP\",\"Labels\":\"\",\"StartTime\":\"2023-07-21T14:00:40.134434969Z\",\"RequestedQPS\":\"8\",\"RequestedDuration\":\"exactly 100 calls\",\"ActualQPS\":7.975606391552989,\"ActualDuration\":12538231589,\"NumThreads\":4,\"Version\":\"1.57.3\",\"DurationHistogram\":{\"Count\":100,\"Min\":0.004223875,\"Max\":0.040490042,\"Sum\":1.5977100850000001,\"Avg\":0.015977100850000002,\"StdDev\":0.008340658047253256,\"Data\":[{\"Start\":0.004223875,\"End\":0.005,\"Percent\":5,\"Count\":5},{\"Start\":0.005,\"End\":0.006,\"Percent\":10,\"Count\":5},{\"Start\":0.006,\"End\":0.007,\"Percent\":14,\"Count\":4},{\"Start\":0.007,\"End\":0.008,\"Percent\":19,\"Count\":5},{\"Start\":0.008,\"End\":0.009000000000000001,\"Percent\":24,\"Count\":5},{\"Start\":0.009000000000000001,\"End\":0.01,\"Percent\":28,\"Count\":4},{\"Start\":0.01,\"End\":0.011,\"Percent\":33,\"Count\":5},{\"Start\":0.011,\"End\":0.012,\"Percent\":36,\"Count\":3},{\"Start\":0.012,\"End\":0.014,\"Percent\":48,\"Count\":12},{\"Start\":0.014,\"End\":0.016,\"Percent\":55,\"Count\":7},{\"Start\":0.016,\"End\":0.018000000000000002,\"Percent\":65,\"Count\":10},{\"Start\":0.018000000000000002,\"End\":0.02,\"Percent\":74,\"Count\":9},{\"Start\":0.02,\"End\":0.025,\"Percent\":85,\"Count\":11},{\"Start\":0.025,\"End\":0.03,\"Percent\":93,\"Count\":8},{\"Start\":0.03,\"End\":0.035,\"Percent\":98,\"Count\":5},{\"Start\":0.035,\"End\":0.04,\"Percent\":99,\"Count\":1},{\"Start\":0.04,\"End\":0.040490042,\"Percent\":100,\"Count\":1}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.014571428571428572},{\"Percentile\":75,\"Value\":0.020454545454545454},{\"Percentile\":90,\"Value\":0.028125},{\"Percentile\":95,\"Value\":0.032},{\"Percentile\":99,\"Value\":0.04},{\"Percentile\":99.9,\"Value\":0.0404410378}]},\"ErrorsDurationHistogram\":{\"Count\":0,\"Min\":0,\"Max\":0,\"Sum\":0,\"Avg\":0,\"StdDev\":0,\"Data\":null},\"Exactly\":100,\"Jitter\":false,\"Uniform\":false,\"NoCatchUp\":false,\"RunID\":0,\"AccessLoggerInfo\":\"\",\"ID\":\"2023-07-21-140040\",\"RetCodes\":{\"200\":100},\"IPCountMap\":{\"10.96.108.76:80\":4},\"Insecure\":false,\"MTLS\":false,\"CACert\":\"\",\"Cert\":\"\",\"Key\":\"\",\"UnixDomainSocket\":\"\",\"URL\":\"http://httpbin.default/get\",\"NumConnections\":1,\"Compression\":false,\"DisableFastClient\":false,\"HTTP10\":false,\"H2\":false,\"DisableKeepAlive\":false,\"AllowHalfClose\":false,\"FollowRedirects\":false,\"Resolve\":\"\",\"HTTPReqTimeOut\":3000000000,\"UserCredentials\":\"\",\"ContentType\":\"\",\"Payload\":null,\"MethodOverride\":\"\",\"LogErrors\":false,\"SequentialWarmup\":false,\"ConnReuseRange\":[0,0],\"NoResolveEachConn\":false,\"Offset\":0,\"Resolution\":0.001,\"Sizes\":{\"Count\":100,\"Min\":413,\"Max\":413,\"Sum\":41300,\"Avg\":413,\"StdDev\":0,\"Data\":[{\"Start\":413,\"End\":413,\"Percent\":100,\"Count\":100}]},\"HeaderSizes\":{\"Count\":100,\"Min\":230,\"Max\":230,\"Sum\":23000,\"Avg\":230,\"StdDev\":0,\"Data\":[{\"Start\":230,\"End\":230,\"Percent\":100,\"Count\":100}]},\"Sockets\":[1,1,1,1],\"SocketCount\":4,\"ConnectionStats\":{\"Count\":4,\"Min\":0.001385875,\"Max\":0.001724375,\"Sum\":0.006404583,\"Avg\":0.00160114575,\"StdDev\":0.00013101857565508474,\"Data\":[{\"Start\":0.001385875,\"End\":0.001724375,\"Percent\":100,\"Count\":4}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.0014987083333333332},{\"Percentile\":75,\"Value\":0.0016115416666666667},{\"Percentile\":90,\"Value\":0.0016792416666666667},{\"Percentile\":95,\"Value\":0.0017018083333333333},{\"Percentile\":99,\"Value\":0.0017198616666666668},{\"Percentile\":99.9,\"Value\":0.0017239236666666668}]},\"AbortOn\":0}},\"Summary\":{\"numVersions\":1,\"versionNames\":null,\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"units\":\"msec\",\"type\":\"Histogram\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"histMetricValues\":[{\"http/latency\":[{\"lower\":4.2238750000000005,\"upper\":5,\"count\":5},{\"lower\":5,\"upper\":6,\"count\":5},{\"lower\":6,\"upper\":7,\"count\":4},{\"lower\":7,\"upper\":8,\"count\":5},{\"lower\":8,\"upper\":9.000000000000002,\"count\":5},{\"lower\":9.000000000000002,\"upper\":10,\"count\":4},{\"lower\":10,\"upper\":11,\"count\":5},{\"lower\":11,\"upper\":12,\"count\":3},{\"lower\":12,\"upper\":14,\"count\":12},{\"lower\":14,\"upper\":16,\"count\":7},{\"lower\":16,\"upper\":18.000000000000004,\"count\":10},{\"lower\":18.000000000000004,\"upper\":20,\"count\":9},{\"lower\":20,\"upper\":25,\"count\":11},{\"lower\":25,\"upper\":30,\"count\":8},{\"lower\":30,\"upper\":35,\"count\":5},{\"lower\":35,\"upper\":40,\"count\":1},{\"lower\":40,\"upper\":40.490041999999995,\"count\":1}]}],\"SummaryMetricValues\":[{}]}}" + + fortioResult := util.FortioResult{} + err := json.Unmarshal([]byte(result), &fortioResult) + assert.NoError(t, err) + + dashboard := getFortioDashboard(fortioResult) + assert.NotNil(t, dashboard) + dashboardBytes, err := json.Marshal(dashboard) + assert.NoError(t, err) + + assert.Equal(t, "{\"Endpoints\":{\"http://httpbin.default/get\":{\"Durations\":[{\"Version\":\"0\",\"Bucket\":\"4.2 - 5\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"5 - 6\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"6 - 7\",\"Value\":4},{\"Version\":\"0\",\"Bucket\":\"7 - 8\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"8 - 9\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"9 - 10\",\"Value\":4},{\"Version\":\"0\",\"Bucket\":\"10 - 11\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"11 - 12\",\"Value\":3},{\"Version\":\"0\",\"Bucket\":\"12 - 14\",\"Value\":12},{\"Version\":\"0\",\"Bucket\":\"14 - 16\",\"Value\":7},{\"Version\":\"0\",\"Bucket\":\"16 - 18\",\"Value\":10},{\"Version\":\"0\",\"Bucket\":\"18 - 20\",\"Value\":9},{\"Version\":\"0\",\"Bucket\":\"20 - 25\",\"Value\":11},{\"Version\":\"0\",\"Bucket\":\"25 - 30\",\"Value\":8},{\"Version\":\"0\",\"Bucket\":\"30 - 35\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"35 - 40\",\"Value\":1},{\"Version\":\"0\",\"Bucket\":\"40 - 40.4\",\"Value\":1}],\"Statistics\":{\"Count\":100,\"Mean\":15.977100850000001,\"StdDev\":8.340658047253257,\"Min\":4.2238750000000005,\"Max\":40.490041999999995},\"Error durations\":[],\"Error statistics\":{\"Count\":0,\"Mean\":0,\"StdDev\":0,\"Min\":0,\"Max\":0},\"Return codes\":{\"200\":100}}},\"Summary\":{\"SummaryMetricValues\":[{}],\"histMetricValues\":[{\"http/latency\":[{\"count\":5,\"lower\":4.2238750000000005,\"upper\":5},{\"count\":5,\"lower\":5,\"upper\":6},{\"count\":4,\"lower\":6,\"upper\":7},{\"count\":5,\"lower\":7,\"upper\":8},{\"count\":5,\"lower\":8,\"upper\":9.000000000000002},{\"count\":4,\"lower\":9.000000000000002,\"upper\":10},{\"count\":5,\"lower\":10,\"upper\":11},{\"count\":3,\"lower\":11,\"upper\":12},{\"count\":12,\"lower\":12,\"upper\":14},{\"count\":7,\"lower\":14,\"upper\":16},{\"count\":10,\"lower\":16,\"upper\":18.000000000000004},{\"count\":9,\"lower\":18.000000000000004,\"upper\":20},{\"count\":11,\"lower\":20,\"upper\":25},{\"count\":8,\"lower\":25,\"upper\":30},{\"count\":5,\"lower\":30,\"upper\":35},{\"count\":1,\"lower\":35,\"upper\":40},{\"count\":1,\"lower\":40,\"upper\":40.490041999999995}]}],\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"type\":\"Histogram\",\"units\":\"msec\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"numVersions\":1,\"versionNames\":null}}", string(dashboardBytes)) +} diff --git a/storage/badgerdb/simple.go b/storage/badgerdb/simple.go index aeb550af4..768b190cc 100644 --- a/storage/badgerdb/simple.go +++ b/storage/badgerdb/simple.go @@ -312,3 +312,41 @@ func (cl Client) GetMetrics(applicationName string, version int, signature strin return &metrics, nil } + +func getResultKey(namespace, experiment string) string { + // getResultKey() is just getUserPrefix() with the user appended at the end + return fmt.Sprintf("kt-result::%s::%s", namespace, experiment) +} + +// SetResult sets the result of a particular HTTP/gRPC run for a particular namespace and experiment name +// the data is []byte in order to make this function reusable for HTTP and gRPC +func (cl Client) SetResult(namespace, experiment string, data []byte) error { + key := getResultKey(namespace, experiment) + + return cl.db.Update(func(txn *badger.Txn) error { + e := badger.NewEntry([]byte(key), data).WithTTL(cl.additionalOptions.TTL) + err := txn.SetEntry(e) + return err + }) +} + +// GetMetrics returns the result of a particular HTTP/gRPC run for a particular namespace and experiment name +// the data is []byte in order to make this function reusable for HTTP and gRPC +func (cl Client) GetResult(namespace, experiment string) ([]byte, error) { + var valCopy []byte + err := cl.db.View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte(getResultKey(namespace, experiment))) + if err != nil { + return err + } + + valCopy, err = item.ValueCopy(nil) + if err != nil { + return err + } + + return nil + }) + + return valCopy, err +} diff --git a/storage/badgerdb/simple_test.go b/storage/badgerdb/simple_test.go index 1afdf47a0..4d8f3329d 100644 --- a/storage/badgerdb/simple_test.go +++ b/storage/badgerdb/simple_test.go @@ -272,3 +272,52 @@ func TestGetMetrics(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "{}", string(jsonMetrics)) } + +func TestSetResult(t *testing.T) { + tempDirPath := t.TempDir() + + client, err := GetClient(badger.DefaultOptions(tempDirPath), AdditionalOptions{}) + assert.NoError(t, err) + + namespace := "my-namespace" + experiment := "my-experiment" + data := "hello world" + + err = client.SetResult(namespace, experiment, []byte(data)) + assert.NoError(t, err) + + // get result + err = client.db.View(func(txn *badger.Txn) error { + key := getResultKey(namespace, experiment) + item, err := txn.Get([]byte(key)) + assert.NoError(t, err) + assert.NotNil(t, item) + + err = item.Value(func(val []byte) error { + assert.Equal(t, data, string(val)) + return nil + }) + assert.NoError(t, err) + + return nil + }) + assert.NoError(t, err) +} + +func TestGetResult(t *testing.T) { + tempDirPath := t.TempDir() + + client, err := GetClient(badger.DefaultOptions(tempDirPath), AdditionalOptions{}) + assert.NoError(t, err) + + namespace := "my-namespace" + experiment := "my-experiment" + data := "hello world" + + err = client.SetResult(namespace, experiment, []byte(data)) + assert.NoError(t, err) + + result, err := client.GetResult(namespace, experiment) + assert.NoError(t, err) + assert.Equal(t, data, string(result)) +} diff --git a/storage/interface.go b/storage/interface.go index 0f44b6471..a44ed7ce1 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -37,7 +37,7 @@ type VersionMetrics map[string]struct { // Interface enables interaction with a storage entity // Can be mocked in unit tests with fake implementation type Interface interface { - // Returns a nested map of the metrics data for a particular application, version, and signature + // returns a nested map of the metrics data for a particular application, version, and signature // Example: // { // "my-metric": { @@ -63,4 +63,10 @@ type Interface interface { // Example key: kt-users::my-app::0::my-signature::my-user -> true SetUser(applicationName string, version int, signature, user string) error + + // returns the HTTP/gRPC results for a particular namespace and experiment + GetResult(namespace, experiment string) ([]byte, error) + + // Example key: kt-result::my-namespace::my-experiment-name -> per endpoint JSON data + summary + SetResult(namespace, experiment string, data []byte) error } From a007cb9c7b5e2a1a73cf747a069dac4fd2c0cd18 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 24 Jul 2023 12:50:38 -0400 Subject: [PATCH 002/121] Clean up Signed-off-by: Alan Cha --- abn/service_test.go | 2 +- base/collect_grpc.go | 9 --------- base/collect_http.go | 4 +++- base/experiment.go | 4 ++++ metrics/server.go | 12 ++++++++++-- storage/badgerdb/simple.go | 2 +- 6 files changed, 19 insertions(+), 14 deletions(-) diff --git a/abn/service_test.go b/abn/service_test.go index 78fd5f9f8..a21e12528 100644 --- a/abn/service_test.go +++ b/abn/service_test.go @@ -236,7 +236,7 @@ func TestLaunchGRPCServer(t *testing.T) { defer cancel() // define METRICS_DIR - err := os.Setenv(metricsDirEnv, t.TempDir()) + err := os.Setenv(MetricsDirEnv, t.TempDir()) assert.NoError(t, err) configFile := filepath.Clean(util.CompletePath("../testdata", "abninputs/config.yaml")) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index bdc08cdca..1bb89c2fc 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -1,9 +1,7 @@ package base import ( - "encoding/json" "fmt" - "os" "time" "github.com/bojand/ghz/runner" @@ -123,13 +121,6 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) return results, err } - igrJSON, _ := json.Marshal(igr) - f, _ := os.Create("ghz.json") - defer f.Close() - f.Write(igrJSON) - - f.Sync() - results[gRPCMetricPrefix] = igr } diff --git a/base/collect_http.go b/base/collect_http.go index 8abe872a2..8c2e870df 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -521,7 +521,9 @@ func (t *collectHTTPTask) run(exp *Experiment) error { fortioResultBytes, _ := json.Marshal(fortioResult) log.Logger.Trace(string(fortioResultBytes)) - putResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult) + if err = putResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { + return err + } return nil } diff --git a/base/experiment.go b/base/experiment.go index 0c8519e54..b05453bcd 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -33,9 +33,13 @@ type Task interface { // ExperimentSpec specifies the set of tasks in this experiment type ExperimentSpec []Task +// ExperimentMetadata species the name and namespace of the experiment +// Used in http and grpc tasks to send the name and namespace to the metrics server type ExperimentMetadata struct { + // Name is the name of the experiment Name string + // Namespace is the namespace the experiment was deployed in Namespace string } diff --git a/metrics/server.go b/metrics/server.go index c5b07f400..9805827c0 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -444,7 +444,7 @@ func getFortioDashboard(fortioResult util.FortioResult) httpDashboard { } for endpoint, endpointResult := range fortioResult.EndpointResults { - // TODO: endpointResult := endpointResult? + endpointResult := endpointResult dashboard.Endpoints[endpoint] = getFortioEndpointPanel(&endpointResult) } @@ -480,7 +480,15 @@ func putResult(w http.ResponseWriter, r *http.Request) { log.Logger.Tracef("putResult called for namespace %s and experiment %s", namespace, experiment) - defer r.Body.Close() + defer func() { + err := r.Body.Close() + if err != nil { + errorMessage := fmt.Sprintf("cannot close request body: %e", err) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + }() body, err := io.ReadAll(r.Body) if err != nil { errorMessage := fmt.Sprintf("cannot read request body: %e", err) diff --git a/storage/badgerdb/simple.go b/storage/badgerdb/simple.go index 768b190cc..53ce60a36 100644 --- a/storage/badgerdb/simple.go +++ b/storage/badgerdb/simple.go @@ -330,7 +330,7 @@ func (cl Client) SetResult(namespace, experiment string, data []byte) error { }) } -// GetMetrics returns the result of a particular HTTP/gRPC run for a particular namespace and experiment name +// GetResult returns the result of a particular HTTP/gRPC run for a particular namespace and experiment name // the data is []byte in order to make this function reusable for HTTP and gRPC func (cl Client) GetResult(namespace, experiment string) ([]byte, error) { var valCopy []byte From 0fa6ddc1fc2fd468ac8a13cd9344d96d064489c8 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 24 Jul 2023 14:42:56 -0400 Subject: [PATCH 003/121] Add grafana option to HTTP task Signed-off-by: Alan Cha --- base/collect_http.go | 60 +++++++++++++++++++++++---------------- base/test_helpers.go | 1 - driver/kubedriver_test.go | 4 ++- 3 files changed, 38 insertions(+), 27 deletions(-) diff --git a/base/collect_http.go b/base/collect_http.go index 8c2e870df..433871b4a 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -61,6 +61,11 @@ type collectHTTPInputs struct { // Endpoints is used to define multiple endpoints to test Endpoints map[string]endpoint `json:"endpoints" yaml:"endpoints"` + + // TODO: remove + // Determines if Grafana dashboard should be created + // dasboard vs report/assess tasks + grafana bool } // FortioResult @@ -318,11 +323,11 @@ func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults } // TODO: does ifr need to be a pointer? - // results[httpMetricPrefix+"-"+endpointID] = ifr - results[endpoint.URL] = *ifr - - // TODO: namespace and experiment name - // putData(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, ifr) + resultsKey := httpMetricPrefix + "-" + endpointID + if t.With.grafana { + resultsKey = endpoint.URL + } + results[resultsKey] = *ifr } } else { fo, err := getFortioOptions(t.With.endpoint) @@ -338,12 +343,15 @@ func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults ifr, err := fhttp.RunHTTPTest(fo) if err != nil { log.Logger.WithStackTrace(err.Error()).Error("fortio failed") - return results, err + return nil, err } // TODO: does ifr need to be a pointer? - // results[httpMetricPrefix] = ifr - results[t.With.endpoint.URL] = *ifr + resultsKey := httpMetricPrefix + if t.With.grafana { + resultsKey = t.With.endpoint.URL + } + results[resultsKey] = *ifr } return results, err @@ -503,26 +511,28 @@ func (t *collectHTTPTask) run(exp *Experiment) error { } } - // push data to metrics service - fortioResult := fortioResult{ - EndpointResults: data, - Summary: exp.Result.Insights, - } + if t.With.grafana { + // push data to metrics service + fortioResult := fortioResult{ + EndpointResults: data, + Summary: exp.Result.Insights, + } - // get URL of metrics server from environment variable - metricsServerURL, ok := os.LookupEnv(MetricsServerURL) - if !ok { - errorMessage := "could not look up METRICS_SERVER_URL environment variable" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } - // TODO: remove - fortioResultBytes, _ := json.Marshal(fortioResult) - log.Logger.Trace(string(fortioResultBytes)) + // TODO: remove + fortioResultBytes, _ := json.Marshal(fortioResult) + log.Logger.Trace(string(fortioResultBytes)) - if err = putResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { - return err + if err = putResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { + return err + } } return nil diff --git a/base/test_helpers.go b/base/test_helpers.go index e82bb5c80..00cc8afde 100644 --- a/base/test_helpers.go +++ b/base/test_helpers.go @@ -34,7 +34,6 @@ func (m *mockDriver) GetRevision() int { // CreateExperimentYaml creates an experiment.yaml file from a template and a URL func CreateExperimentYaml(t *testing.T, template string, url string, output string) { - values := struct { URL string }{ diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index 773290f05..9c32555bd 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -67,6 +67,8 @@ func TestKOps(t *testing.T) { func TestKubeRun(t *testing.T) { _ = os.Chdir(t.TempDir()) + err := os.Setenv(base.MetricsServerURL, "http://iter8.default:8080") + assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -100,7 +102,7 @@ func TestKubeRun(t *testing.T) { }, }, metav1.CreateOptions{}) - err := base.RunExperiment(false, kd) + err = base.RunExperiment(false, kd) assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) From c6f30b4610167e0121d731a82281b70741df0b6c Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 24 Jul 2023 16:11:58 -0400 Subject: [PATCH 004/121] Fix some tests Signed-off-by: Alan Cha --- base/collect_http.go | 17 +++++------------ metrics/server.go | 25 ++++++++++++------------- metrics/server_test.go | 10 +++++++--- 3 files changed, 24 insertions(+), 28 deletions(-) diff --git a/base/collect_http.go b/base/collect_http.go index 433871b4a..4c4c93389 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -68,13 +68,13 @@ type collectHTTPInputs struct { grafana bool } -// FortioResult +// FortioResult is the raw data sent to the metrics server +// This data will be transformed into httpDashboard when getHTTPGrafana is called type FortioResult struct { // key is the endpoint EndpointResults map[string]fhttp.HTTPRunnerResults - // TODO: add type - Summary interface{} + Summary Insights } const ( @@ -357,13 +357,6 @@ func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults return results, err } -type fortioResult struct { - // key is the endpoint - EndpointResults map[string]fhttp.HTTPRunnerResults - - Summary interface{} -} - // run executes this task func (t *collectHTTPTask) run(exp *Experiment) error { err := t.validateInputs() @@ -513,9 +506,9 @@ func (t *collectHTTPTask) run(exp *Experiment) error { if t.With.grafana { // push data to metrics service - fortioResult := fortioResult{ + fortioResult := FortioResult{ EndpointResults: data, - Summary: exp.Result.Insights, + Summary: *exp.Result.Insights, } // get URL of metrics server from environment variable diff --git a/metrics/server.go b/metrics/server.go index 9805827c0..1b28edcc5 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -12,6 +12,7 @@ import ( "time" "github.com/iter8-tools/iter8/abn" + "github.com/iter8-tools/iter8/base" util "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/base/log" "github.com/iter8-tools/iter8/controllers" @@ -79,8 +80,7 @@ type httpDashboard struct { // key is the endpoint Endpoints map[string]httpEndpointPanel - // TODO: add type - Summary interface{} + Summary base.Insights } var allRoutemaps controllers.AllRouteMapsInterface = &controllers.DefaultRoutemaps{} @@ -395,8 +395,7 @@ func bucketLabel(min, max float64, decimalPlace float64) string { return fmt.Sprintf("%s - %s", strconv.FormatFloat(roundDecimal(min, decimalPlace), 'f', -1, 64), strconv.FormatFloat(roundDecimal(max, decimalPlace), 'f', -1, 64)) } -// calculateHistogram creates histograms based on Fortio result -func getFortioCalculateHistogram(fortioHistogram []fstats.Bucket, decimalPlace float64) grafanaHistogram { +func getHTTPHistogram(fortioHistogram []fstats.Bucket, decimalPlace float64) grafanaHistogram { grafanaHistogram := grafanaHistogram{} for _, bucket := range fortioHistogram { @@ -410,7 +409,7 @@ func getFortioCalculateHistogram(fortioHistogram []fstats.Bucket, decimalPlace f return grafanaHistogram } -func getFortioHistogramStats(fortioHistogram *fstats.HistogramData, decimalPlace float64) storage.SummarizedMetric { +func getHTTPStatistics(fortioHistogram *fstats.HistogramData, decimalPlace float64) storage.SummarizedMetric { return storage.SummarizedMetric{ Count: uint64(fortioHistogram.Count), Mean: fortioHistogram.Avg * 1000, @@ -420,16 +419,16 @@ func getFortioHistogramStats(fortioHistogram *fstats.HistogramData, decimalPlace } } -func getFortioEndpointPanel(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpointPanel { +func getHTTPEndpointPanel(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpointPanel { result := httpEndpointPanel{} if httpRunnerResults.DurationHistogram != nil { - result.Durations = getFortioCalculateHistogram(httpRunnerResults.DurationHistogram.Data, 1) - result.Statistics = getFortioHistogramStats(httpRunnerResults.DurationHistogram, 1) + result.Durations = getHTTPHistogram(httpRunnerResults.DurationHistogram.Data, 1) + result.Statistics = getHTTPStatistics(httpRunnerResults.DurationHistogram, 1) } if httpRunnerResults.ErrorsDurationHistogram != nil { - result.ErrorDurations = getFortioCalculateHistogram(httpRunnerResults.ErrorsDurationHistogram.Data, 1) - result.ErrorStatistics = getFortioHistogramStats(httpRunnerResults.ErrorsDurationHistogram, 1) + result.ErrorDurations = getHTTPHistogram(httpRunnerResults.ErrorsDurationHistogram.Data, 1) + result.ErrorStatistics = getHTTPStatistics(httpRunnerResults.ErrorsDurationHistogram, 1) } result.ReturnCodes = httpRunnerResults.RetCodes @@ -437,7 +436,7 @@ func getFortioEndpointPanel(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndp return result } -func getFortioDashboard(fortioResult util.FortioResult) httpDashboard { +func getHTTPDashboardHelper(fortioResult util.FortioResult) httpDashboard { // add endpoint results dashboard := httpDashboard{ Endpoints: map[string]httpEndpointPanel{}, @@ -445,7 +444,7 @@ func getFortioDashboard(fortioResult util.FortioResult) httpDashboard { for endpoint, endpointResult := range fortioResult.EndpointResults { endpointResult := endpointResult - dashboard.Endpoints[endpoint] = getFortioEndpointPanel(&endpointResult) + dashboard.Endpoints[endpoint] = getHTTPEndpointPanel(&endpointResult) } // add summary @@ -556,7 +555,7 @@ func getHTTPGrafana(w http.ResponseWriter, r *http.Request) { } // JSON marshal the dashboard - dashboardBytes, err := json.Marshal(getFortioDashboard(fortioResult)) + dashboardBytes, err := json.Marshal(getHTTPDashboardHelper(fortioResult)) if err != nil { errorMessage := "cannot JSON marshal HTTP dashboard" log.Logger.Error(errorMessage) diff --git a/metrics/server_test.go b/metrics/server_test.go index 95101af05..c29ab3054 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -478,7 +478,7 @@ func TestFortioCalculateHistogram(t *testing.T) { }, } - histogram := getFortioCalculateHistogram(data, 1) + histogram := getHTTPHistogram(data, 1) histogramJSON, _ := json.Marshal(histogram) fmt.Println(string(histogramJSON)) @@ -491,10 +491,14 @@ func TestFortioHistogramStats(t *testing.T) { err := json.Unmarshal([]byte(result), &fortioResult) assert.NoError(t, err) - dashboard := getFortioDashboard(fortioResult) + dashboard := getHTTPDashboardHelper(fortioResult) assert.NotNil(t, dashboard) dashboardBytes, err := json.Marshal(dashboard) assert.NoError(t, err) - assert.Equal(t, "{\"Endpoints\":{\"http://httpbin.default/get\":{\"Durations\":[{\"Version\":\"0\",\"Bucket\":\"4.2 - 5\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"5 - 6\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"6 - 7\",\"Value\":4},{\"Version\":\"0\",\"Bucket\":\"7 - 8\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"8 - 9\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"9 - 10\",\"Value\":4},{\"Version\":\"0\",\"Bucket\":\"10 - 11\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"11 - 12\",\"Value\":3},{\"Version\":\"0\",\"Bucket\":\"12 - 14\",\"Value\":12},{\"Version\":\"0\",\"Bucket\":\"14 - 16\",\"Value\":7},{\"Version\":\"0\",\"Bucket\":\"16 - 18\",\"Value\":10},{\"Version\":\"0\",\"Bucket\":\"18 - 20\",\"Value\":9},{\"Version\":\"0\",\"Bucket\":\"20 - 25\",\"Value\":11},{\"Version\":\"0\",\"Bucket\":\"25 - 30\",\"Value\":8},{\"Version\":\"0\",\"Bucket\":\"30 - 35\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"35 - 40\",\"Value\":1},{\"Version\":\"0\",\"Bucket\":\"40 - 40.4\",\"Value\":1}],\"Statistics\":{\"Count\":100,\"Mean\":15.977100850000001,\"StdDev\":8.340658047253257,\"Min\":4.2238750000000005,\"Max\":40.490041999999995},\"Error durations\":[],\"Error statistics\":{\"Count\":0,\"Mean\":0,\"StdDev\":0,\"Min\":0,\"Max\":0},\"Return codes\":{\"200\":100}}},\"Summary\":{\"SummaryMetricValues\":[{}],\"histMetricValues\":[{\"http/latency\":[{\"count\":5,\"lower\":4.2238750000000005,\"upper\":5},{\"count\":5,\"lower\":5,\"upper\":6},{\"count\":4,\"lower\":6,\"upper\":7},{\"count\":5,\"lower\":7,\"upper\":8},{\"count\":5,\"lower\":8,\"upper\":9.000000000000002},{\"count\":4,\"lower\":9.000000000000002,\"upper\":10},{\"count\":5,\"lower\":10,\"upper\":11},{\"count\":3,\"lower\":11,\"upper\":12},{\"count\":12,\"lower\":12,\"upper\":14},{\"count\":7,\"lower\":14,\"upper\":16},{\"count\":10,\"lower\":16,\"upper\":18.000000000000004},{\"count\":9,\"lower\":18.000000000000004,\"upper\":20},{\"count\":11,\"lower\":20,\"upper\":25},{\"count\":8,\"lower\":25,\"upper\":30},{\"count\":5,\"lower\":30,\"upper\":35},{\"count\":1,\"lower\":35,\"upper\":40},{\"count\":1,\"lower\":40,\"upper\":40.490041999999995}]}],\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"type\":\"Histogram\",\"units\":\"msec\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"type\":\"Gauge\",\"units\":\"msec\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"numVersions\":1,\"versionNames\":null}}", string(dashboardBytes)) + assert.Equal( + t, + "{\"Endpoints\":{\"http://httpbin.default/get\":{\"Durations\":[{\"Version\":\"0\",\"Bucket\":\"4.2 - 5\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"5 - 6\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"6 - 7\",\"Value\":4},{\"Version\":\"0\",\"Bucket\":\"7 - 8\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"8 - 9\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"9 - 10\",\"Value\":4},{\"Version\":\"0\",\"Bucket\":\"10 - 11\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"11 - 12\",\"Value\":3},{\"Version\":\"0\",\"Bucket\":\"12 - 14\",\"Value\":12},{\"Version\":\"0\",\"Bucket\":\"14 - 16\",\"Value\":7},{\"Version\":\"0\",\"Bucket\":\"16 - 18\",\"Value\":10},{\"Version\":\"0\",\"Bucket\":\"18 - 20\",\"Value\":9},{\"Version\":\"0\",\"Bucket\":\"20 - 25\",\"Value\":11},{\"Version\":\"0\",\"Bucket\":\"25 - 30\",\"Value\":8},{\"Version\":\"0\",\"Bucket\":\"30 - 35\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"35 - 40\",\"Value\":1},{\"Version\":\"0\",\"Bucket\":\"40 - 40.4\",\"Value\":1}],\"Statistics\":{\"Count\":100,\"Mean\":15.977100850000001,\"StdDev\":8.340658047253257,\"Min\":4.2238750000000005,\"Max\":40.490041999999995},\"Error durations\":[],\"Error statistics\":{\"Count\":0,\"Mean\":0,\"StdDev\":0,\"Min\":0,\"Max\":0},\"Return codes\":{\"200\":100}}},\"Summary\":{\"numVersions\":1,\"versionNames\":null,\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"units\":\"msec\",\"type\":\"Histogram\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"histMetricValues\":[{\"http/latency\":[{\"lower\":4.2238750000000005,\"upper\":5,\"count\":5},{\"lower\":5,\"upper\":6,\"count\":5},{\"lower\":6,\"upper\":7,\"count\":4},{\"lower\":7,\"upper\":8,\"count\":5},{\"lower\":8,\"upper\":9.000000000000002,\"count\":5},{\"lower\":9.000000000000002,\"upper\":10,\"count\":4},{\"lower\":10,\"upper\":11,\"count\":5},{\"lower\":11,\"upper\":12,\"count\":3},{\"lower\":12,\"upper\":14,\"count\":12},{\"lower\":14,\"upper\":16,\"count\":7},{\"lower\":16,\"upper\":18.000000000000004,\"count\":10},{\"lower\":18.000000000000004,\"upper\":20,\"count\":9},{\"lower\":20,\"upper\":25,\"count\":11},{\"lower\":25,\"upper\":30,\"count\":8},{\"lower\":30,\"upper\":35,\"count\":5},{\"lower\":35,\"upper\":40,\"count\":1},{\"lower\":40,\"upper\":40.490041999999995,\"count\":1}]}],\"SummaryMetricValues\":[{}]}}", + string(dashboardBytes), + ) } From d07a5d9e972a97f26a0e708fbea29937d9908570 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 24 Jul 2023 16:26:18 -0400 Subject: [PATCH 005/121] More clean up Signed-off-by: Alan Cha --- base/collect_http.go | 18 +++++++----------- metrics/server.go | 8 ++++---- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/base/collect_http.go b/base/collect_http.go index 4c4c93389..5893cb1ad 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -62,9 +62,9 @@ type collectHTTPInputs struct { // Endpoints is used to define multiple endpoints to test Endpoints map[string]endpoint `json:"endpoints" yaml:"endpoints"` - // TODO: remove // Determines if Grafana dashboard should be created // dasboard vs report/assess tasks + // TODO: remove grafana bool } @@ -101,9 +101,11 @@ const ( // example: latency-p75.0 is the 75th percentile latency builtInHTTPLatencyPercentilePrefix = "latency-p" - // TODO: move elsewhere, abn/service seems to produce cyclical dependency // MetricsServerURL is the URL of the metrics server + // TODO: move elsewhere, abn/service seems to produce cyclical dependency, also needed by gRPC MetricsServerURL = "METRICS_SERVER_URL" + + PerformanceResultPath = "/performanceResult" ) var ( @@ -241,11 +243,9 @@ func getFortioOptions(c endpoint) (*fhttp.HTTPRunnerOptions, error) { return fo, nil } -// TODO: rename to /performanceResult -// putResultToMetricsService -func putResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { +func putPerformanceResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { // handle URL and URL parameters - u, _ := url.ParseRequestURI(metricsServerURL + "/result") + u, _ := url.ParseRequestURI(metricsServerURL + PerformanceResultPath) params := url.Values{} params.Add("namespace", namespace) params.Add("experiment", experiment) @@ -519,11 +519,7 @@ func (t *collectHTTPTask) run(exp *Experiment) error { return fmt.Errorf(errorMessage) } - // TODO: remove - fortioResultBytes, _ := json.Marshal(fortioResult) - log.Logger.Trace(string(fortioResultBytes)) - - if err = putResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { + if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { return err } } diff --git a/metrics/server.go b/metrics/server.go index 1b28edcc5..6d07835b0 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -101,8 +101,8 @@ func Start(stopCh <-chan struct{}) error { // configure endpoints http.HandleFunc("/metrics", getMetrics) - http.HandleFunc("/result", putResult) - http.HandleFunc("/HTTPGrafana", getHTTPGrafana) + http.HandleFunc(base.PerformanceResultPath, putResult) + http.HandleFunc("/httpDashboard", getHTTPDashboard) // configure HTTP server server := &http.Server{ @@ -506,8 +506,8 @@ func putResult(w http.ResponseWriter, r *http.Request) { } } -// getHTTPGrafana handles GET /getHTTPGrafana with query parameter application=namespace/name -func getHTTPGrafana(w http.ResponseWriter, r *http.Request) { +// getHTTPDashboard handles GET /getHTTPDashboard with query parameter application=namespace/name +func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { log.Logger.Trace("getHTTPGrafana called") defer log.Logger.Trace("getHTTPGrafana completed") From 2c416bfee8a6ea142b729e474a42e0ef6dbba5d9 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 24 Jul 2023 21:31:02 -0400 Subject: [PATCH 006/121] Fix import Signed-off-by: Alan Cha --- metrics/server.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/metrics/server.go b/metrics/server.go index 6d07835b0..39aff18de 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -12,7 +12,6 @@ import ( "time" "github.com/iter8-tools/iter8/abn" - "github.com/iter8-tools/iter8/base" util "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/base/log" "github.com/iter8-tools/iter8/controllers" @@ -80,7 +79,7 @@ type httpDashboard struct { // key is the endpoint Endpoints map[string]httpEndpointPanel - Summary base.Insights + Summary util.Insights } var allRoutemaps controllers.AllRouteMapsInterface = &controllers.DefaultRoutemaps{} @@ -101,7 +100,7 @@ func Start(stopCh <-chan struct{}) error { // configure endpoints http.HandleFunc("/metrics", getMetrics) - http.HandleFunc(base.PerformanceResultPath, putResult) + http.HandleFunc(util.PerformanceResultPath, putResult) http.HandleFunc("/httpDashboard", getHTTPDashboard) // configure HTTP server From 25c4a07761e0e48ed7b61ea99b8ba6a264295931 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 26 Jul 2023 09:11:56 -0400 Subject: [PATCH 007/121] Improve custom metrics tests Signed-off-by: Alan Cha --- base/custom_metrics_test.go | 100 +++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 48 deletions(-) diff --git a/base/custom_metrics_test.go b/base/custom_metrics_test.go index b1e04cad5..797ec9e37 100644 --- a/base/custom_metrics_test.go +++ b/base/custom_metrics_test.go @@ -1,7 +1,6 @@ package base import ( - "errors" "io" "net/http" "net/url" @@ -89,7 +88,7 @@ const ( "}[0s])) by (le))" ) -func getCustomMetricsTask(t *testing.T, providerName string, providerURL string) *customMetricsTask { +func getCustomMetricsTask(providerName string, providerURL string) *customMetricsTask { // valid collect database task... should succeed ct := &customMetricsTask{ TaskMeta: TaskMeta{ @@ -99,10 +98,6 @@ func getCustomMetricsTask(t *testing.T, providerName string, providerURL string) Templates: map[string]string{providerName: providerURL}, }, } - - httpmock.Activate() - t.Cleanup(httpmock.DeactivateAndReset) - httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) return ct } @@ -157,7 +152,8 @@ func TestIstioProm(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, "istio-prom", istioPromProviderURL) + startHTTPMock(t) + ct := getCustomMetricsTask("istio-prom", istioPromProviderURL) ct.With.Values = map[string]interface{}{"latencyPercentiles": []string{"90"}} ct.With.VersionValues = []map[string]interface{}{{ "labels": map[string]interface{}{ @@ -169,11 +165,11 @@ func TestIstioProm(t *testing.T) { }} // mock provider URL - httpmock.RegisterResponder("GET", istioPromProviderURL, + httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, httpmock.NewStringResponder(200, tplString)) // mock Istio Prometheus server - httpmock.RegisterResponder("GET", "http://prometheus.istio-system:9090/api/v1/query", + httpmock.RegisterResponder(http.MethodGet, "http://prometheus.istio-system:9090/api/v1/query", func(req *http.Request) (*http.Response, error) { queryParam := strings.TrimSpace(req.URL.Query().Get("query")) @@ -264,7 +260,7 @@ func TestIstioProm(t *testing.T) { }`), nil } - return nil, errors.New("") + return nil, nil }) exp := &Experiment{ @@ -293,14 +289,15 @@ func TestNaN(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, "nan", "http://url") + startHTTPMock(t) + ct := getCustomMetricsTask("nan", "http://url") // mock provider URL - httpmock.RegisterResponder("GET", "http://url", + httpmock.RegisterResponder(http.MethodGet, "http://url", httpmock.NewStringResponder(200, tplString)) // mock provider - httpmock.RegisterResponder("GET", "http://url/query", + httpmock.RegisterResponder(http.MethodGet, "http://url/query", func(req *http.Request) (*http.Response, error) { queryParam := strings.TrimSpace(req.URL.Query().Get("query")) t.Logf("queryParam = %s", queryParam) @@ -312,7 +309,7 @@ func TestNaN(t *testing.T) { return httpmock.NewStringResponse(200, `{"value": "NaN"}`), nil } - return nil, errors.New("") + return nil, nil }) // experiment @@ -341,14 +338,15 @@ func TestCEOneVersion(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, testCE, cePromProviderURL) + startHTTPMock(t) + ct := getCustomMetricsTask(testCE, cePromProviderURL) // mock provider URL - httpmock.RegisterResponder("GET", istioPromProviderURL, + httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, httpmock.NewStringResponder(200, tplString)) // request-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCERequestCount), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -366,7 +364,7 @@ func TestCEOneVersion(t *testing.T) { }`)) // error-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -384,7 +382,7 @@ func TestCEOneVersion(t *testing.T) { }`)) // error-rate - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -427,10 +425,11 @@ func TestCEVersionValues(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, testCE, cePromProviderURL) + startHTTPMock(t) + ct := getCustomMetricsTask(testCE, cePromProviderURL) // mock provider URL - httpmock.RegisterResponder("GET", istioPromProviderURL, + httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, httpmock.NewStringResponder(200, tplString)) ct.With.VersionValues = []map[string]interface{}{{ @@ -438,7 +437,7 @@ func TestCEVersionValues(t *testing.T) { }} // request-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCERequestCountWithRevisionName), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCountWithRevisionName), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -456,7 +455,7 @@ func TestCEVersionValues(t *testing.T) { }`)) // error-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorCountWithRevisionName), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCountWithRevisionName), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -474,7 +473,7 @@ func TestCEVersionValues(t *testing.T) { }`)) // error-rate - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorRateWithRevisionName), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRateWithRevisionName), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -517,22 +516,23 @@ func TestCEUnauthorized(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, testCE, cePromProviderURL) + startHTTPMock(t) + ct := getCustomMetricsTask(testCE, cePromProviderURL) // mock provider URL - httpmock.RegisterResponder("GET", istioPromProviderURL, + httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, httpmock.NewStringResponder(200, tplString)) // request-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCERequestCount), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(401, `Unauthorized`)) // error-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), httpmock.NewStringResponder(401, `Unauthorized`)) // error-rate - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), httpmock.NewStringResponder(401, `Unauthorized`)) exp := &Experiment{ @@ -559,14 +559,15 @@ func TestCESomeValues(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, testCE, cePromProviderURL) + startHTTPMock(t) + ct := getCustomMetricsTask(testCE, cePromProviderURL) // mock provider URL - httpmock.RegisterResponder("GET", istioPromProviderURL, + httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, httpmock.NewStringResponder(200, tplString)) // request-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ "status": "success", "data": { "resultType": "vector", @@ -575,7 +576,7 @@ func TestCESomeValues(t *testing.T) { }`)) // error-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -593,7 +594,7 @@ func TestCESomeValues(t *testing.T) { }`)) // error-rate - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -639,16 +640,17 @@ func TestCEMultipleVersions(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, testCE, cePromProviderURL) + startHTTPMock(t) + ct := getCustomMetricsTask(testCE, cePromProviderURL) ct.With.VersionValues = []map[string]interface{}{{}, {}} // mock provider URL - httpmock.RegisterResponder("GET", istioPromProviderURL, + httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, httpmock.NewStringResponder(200, tplString)) // request-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ "status": "success", "data": { "resultType": "vector", @@ -657,7 +659,7 @@ func TestCEMultipleVersions(t *testing.T) { }`)) // error-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -675,7 +677,7 @@ func TestCEMultipleVersions(t *testing.T) { }`)) // error-rate - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -723,16 +725,17 @@ func TestCEMultipleVersionsAndMetrics(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, testCE, cePromProviderURL) + startHTTPMock(t) + ct := getCustomMetricsTask(testCE, cePromProviderURL) ct.With.VersionValues = []map[string]interface{}{{}, {}} // mock provider URL - httpmock.RegisterResponder("GET", istioPromProviderURL, + httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, httpmock.NewStringResponder(200, tplString)) // request-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ "status": "success", "data": { "resultType": "vector", @@ -741,7 +744,7 @@ func TestCEMultipleVersionsAndMetrics(t *testing.T) { }`)) // error-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -759,7 +762,7 @@ func TestCEMultipleVersionsAndMetrics(t *testing.T) { }`)) // error-rate - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), httpmock.NewStringResponder(200, `{ "status": "success", "data": { @@ -806,14 +809,15 @@ func TestRequestBody(t *testing.T) { tplString := string(dat) _ = os.Chdir(t.TempDir()) - ct := getCustomMetricsTask(t, testRequestBody, testProviderURL) + startHTTPMock(t) + ct := getCustomMetricsTask(testRequestBody, testProviderURL) // mock provider URL - httpmock.RegisterResponder("GET", istioPromProviderURL, + httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, httpmock.NewStringResponder(200, tplString)) // request-count - httpmock.RegisterResponder("GET", testCEPromURL+queryString+url.QueryEscape(exampleQueryParameter), + httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(exampleQueryParameter), func(req *http.Request) (*http.Response, error) { if req.Body != nil { b, err := io.ReadAll(req.Body) @@ -840,7 +844,7 @@ func TestRequestBody(t *testing.T) { } } - return nil, errors.New("") + return nil, nil }) exp := &Experiment{ From 6997204a9413673ec274bf09b40aef4335509ee1 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 26 Jul 2023 09:14:32 -0400 Subject: [PATCH 008/121] Fix http task Signed-off-by: Alan Cha --- base/collect_http.go | 230 ++++++++++++++++++++------------------ base/collect_http_test.go | 102 +++++++++++++++++ charts/iter8/values.yaml | 2 +- 3 files changed, 222 insertions(+), 112 deletions(-) diff --git a/base/collect_http.go b/base/collect_http.go index 5893cb1ad..d18c50ab1 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -65,7 +65,7 @@ type collectHTTPInputs struct { // Determines if Grafana dashboard should be created // dasboard vs report/assess tasks // TODO: remove - grafana bool + Grafana bool `json:"grafana" yaml:"grafana"` } // FortioResult is the raw data sent to the metrics server @@ -105,6 +105,7 @@ const ( // TODO: move elsewhere, abn/service seems to produce cyclical dependency, also needed by gRPC MetricsServerURL = "METRICS_SERVER_URL" + // PerformanceResultPath is the path to the PUT performanceResult/ endpoint PerformanceResultPath = "/performanceResult" ) @@ -245,7 +246,11 @@ func getFortioOptions(c endpoint) (*fhttp.HTTPRunnerOptions, error) { func putPerformanceResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { // handle URL and URL parameters - u, _ := url.ParseRequestURI(metricsServerURL + PerformanceResultPath) + u, err := url.ParseRequestURI(metricsServerURL + PerformanceResultPath) + if err != nil { + return err + } + params := url.Values{} params.Add("namespace", namespace) params.Add("experiment", experiment) @@ -274,11 +279,15 @@ func putPerformanceResultToMetricsService(metricsServerURL, namespace, experimen // send request client := &http.Client{} - _, err = client.Do(req) + resp, err := client.Do(req) if err != nil { log.Logger.Error("could not send request to metrics server: ", err) return err } + defer func() { + err = resp.Body.Close() + log.Logger.Error("could not close response body: ", err) + }() log.Logger.Trace("sent request") @@ -324,7 +333,7 @@ func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults // TODO: does ifr need to be a pointer? resultsKey := httpMetricPrefix + "-" + endpointID - if t.With.grafana { + if t.With.Grafana { resultsKey = endpoint.URL } results[resultsKey] = *ifr @@ -348,7 +357,7 @@ func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults // TODO: does ifr need to be a pointer? resultsKey := httpMetricPrefix - if t.With.grafana { + if t.With.Grafana { resultsKey = t.With.endpoint.URL } results[resultsKey] = *ifr @@ -392,135 +401,134 @@ func (t *collectHTTPTask) run(exp *Experiment) error { } in := exp.Result.Insights - // TODO: delete - for provider, data := range data { - // request count - m := provider + "/" + builtInHTTPRequestCountID - mm := MetricMeta{ - Description: "number of requests sent", - Type: CounterMetricType, - } - if err = in.updateMetric(m, mm, 0, float64(data.DurationHistogram.Count)); err != nil { - return err + if t.With.Grafana { + // push data to metrics service + fortioResult := FortioResult{ + EndpointResults: data, + Summary: *exp.Result.Insights, } - // error count & rate - val := float64(0) - for code, count := range data.RetCodes { - if t.errorCode(code) { - val += float64(count) - } - } - // error count - m = provider + "/" + builtInHTTPErrorCountID - mm = MetricMeta{ - Description: "number of responses that were errors", - Type: CounterMetricType, + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) } - if err = in.updateMetric(m, mm, 0, val); err != nil { + + if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { return err } + } else { + for provider, data := range data { + // request count + m := provider + "/" + builtInHTTPRequestCountID + mm := MetricMeta{ + Description: "number of requests sent", + Type: CounterMetricType, + } + if err = in.updateMetric(m, mm, 0, float64(data.DurationHistogram.Count)); err != nil { + return err + } - // error-rate - m = provider + "/" + builtInHTTPErrorRateID - rc := float64(data.DurationHistogram.Count) - if rc != 0 { + // error count & rate + val := float64(0) + for code, count := range data.RetCodes { + if t.errorCode(code) { + val += float64(count) + } + } + // error count + m = provider + "/" + builtInHTTPErrorCountID mm = MetricMeta{ - Description: "fraction of responses that were errors", - Type: GaugeMetricType, + Description: "number of responses that were errors", + Type: CounterMetricType, } - if err = in.updateMetric(m, mm, 0, val/rc); err != nil { + if err = in.updateMetric(m, mm, 0, val); err != nil { return err } - } - // mean-latency - m = provider + "/" + builtInHTTPLatencyMeanID - mm = MetricMeta{ - Description: "mean of observed latency values", - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Avg); err != nil { - return err - } - - // stddev-latency - m = provider + "/" + builtInHTTPLatencyStdDevID - mm = MetricMeta{ - Description: "standard deviation of observed latency values", - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.StdDev); err != nil { - return err - } - - // min-latency - m = provider + "/" + builtInHTTPLatencyMinID - mm = MetricMeta{ - Description: "minimum of observed latency values", - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Min); err != nil { - return err - } + // error-rate + m = provider + "/" + builtInHTTPErrorRateID + rc := float64(data.DurationHistogram.Count) + if rc != 0 { + mm = MetricMeta{ + Description: "fraction of responses that were errors", + Type: GaugeMetricType, + } + if err = in.updateMetric(m, mm, 0, val/rc); err != nil { + return err + } + } - // max-latency - m = provider + "/" + builtInHTTPLatencyMaxID - mm = MetricMeta{ - Description: "maximum of observed latency values", - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Max); err != nil { - return err - } + // mean-latency + m = provider + "/" + builtInHTTPLatencyMeanID + mm = MetricMeta{ + Description: "mean of observed latency values", + Type: GaugeMetricType, + Units: StringPointer("msec"), + } + if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Avg); err != nil { + return err + } - // percentiles - for _, p := range data.DurationHistogram.Percentiles { - m = fmt.Sprintf("%v/%v%v", provider, builtInHTTPLatencyPercentilePrefix, p.Percentile) + // stddev-latency + m = provider + "/" + builtInHTTPLatencyStdDevID mm = MetricMeta{ - Description: fmt.Sprintf("%v-th percentile of observed latency values", p.Percentile), + Description: "standard deviation of observed latency values", Type: GaugeMetricType, Units: StringPointer("msec"), } - if err = in.updateMetric(m, mm, 0, 1000.0*p.Value); err != nil { + if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.StdDev); err != nil { return err } - } - // latency histogram - m = httpMetricPrefix + "/" + builtInHTTPLatencyHistID - mm = MetricMeta{ - Description: "Latency Histogram", - Type: HistogramMetricType, - Units: StringPointer("msec"), - } - lh := latencyHist(data.DurationHistogram) - if err = in.updateMetric(m, mm, 0, lh); err != nil { - return err - } - } + // min-latency + m = provider + "/" + builtInHTTPLatencyMinID + mm = MetricMeta{ + Description: "minimum of observed latency values", + Type: GaugeMetricType, + Units: StringPointer("msec"), + } + if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Min); err != nil { + return err + } - if t.With.grafana { - // push data to metrics service - fortioResult := FortioResult{ - EndpointResults: data, - Summary: *exp.Result.Insights, - } + // max-latency + m = provider + "/" + builtInHTTPLatencyMaxID + mm = MetricMeta{ + Description: "maximum of observed latency values", + Type: GaugeMetricType, + Units: StringPointer("msec"), + } + if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Max); err != nil { + return err + } - // get URL of metrics server from environment variable - metricsServerURL, ok := os.LookupEnv(MetricsServerURL) - if !ok { - errorMessage := "could not look up METRICS_SERVER_URL environment variable" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } + // percentiles + for _, p := range data.DurationHistogram.Percentiles { + m = fmt.Sprintf("%v/%v%v", provider, builtInHTTPLatencyPercentilePrefix, p.Percentile) + mm = MetricMeta{ + Description: fmt.Sprintf("%v-th percentile of observed latency values", p.Percentile), + Type: GaugeMetricType, + Units: StringPointer("msec"), + } + if err = in.updateMetric(m, mm, 0, 1000.0*p.Value); err != nil { + return err + } + } - if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { - return err + // latency histogram + m = httpMetricPrefix + "/" + builtInHTTPLatencyHistID + mm = MetricMeta{ + Description: "Latency Histogram", + Type: HistogramMetricType, + Units: StringPointer("msec"), + } + lh := latencyHist(data.DurationHistogram) + if err = in.updateMetric(m, mm, 0, lh); err != nil { + return err + } } } diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 7a3775702..888483c1f 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -9,6 +9,7 @@ import ( "testing" "fortio.org/fortio/fhttp" + "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" ) @@ -21,6 +22,12 @@ const ( from = "from" ) +func startHTTPMock(t *testing.T) { + httpmock.Activate() + t.Cleanup(httpmock.DeactivateAndReset) + httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) +} + func TestRunCollectHTTP(t *testing.T) { mux, addr := fhttp.DynamicHTTPServer(false) @@ -342,3 +349,98 @@ func TestErrorCode(t *testing.T) { }) assert.True(t, task.errorCode(5)) } + +func TestPutPerformanceResultToMetricsService(t *testing.T) { + startHTTPMock(t) + + metricsServerURL := "http://my-server.com" + namespace := "my-namespace" + experiment := "my-experiment" + data := map[string]string{ + "hello": "world", + } + + called := false + httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, + func(req *http.Request) (*http.Response, error) { + called = true + + assert.Equal(t, namespace, req.URL.Query().Get("namespace")) + assert.Equal(t, experiment, req.URL.Query().Get("experiment")) + + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.Equal(t, "{\"hello\":\"world\"}", string(body)) + + return httpmock.NewStringResponse(200, "success"), nil + }) + + err := putPerformanceResultToMetricsService( + metricsServerURL, + namespace, + experiment, + data, + ) + assert.NoError(t, err) + assert.True(t, called) +} + +// func TestRunCollectHTTPGrafana(t *testing.T) { +// startHTTPMock(t) + +// metricsServerURL := "http://iter8.default:8080" +// namespace := "default" +// experiment := "default" + +// err := os.Setenv("METRICS_SERVER_URL", metricsServerURL) +// assert.NoError(t, err) + +// metricsServerCalled := false +// httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, +// func(req *http.Request) (*http.Response, error) { +// metricsServerCalled = true + +// assert.Equal(t, namespace, req.URL.Query().Get("namespace")) +// assert.Equal(t, experiment, req.URL.Query().Get("experiment")) + +// // body, err := io.ReadAll(req.Body) +// // assert.NoError(t, err) +// // assert.Equal(t, "{\"hello\":\"world\"}", string(body)) + +// return httpmock.NewStringResponse(200, "success"), nil +// }) + +// endpointURL := "http://foo.com" +// endpointCalled := false +// httpmock.RegisterResponder(http.MethodGet, endpointURL, +// httpmock.NewStringResponder(200, "success")) + +// // valid collect HTTP task... should succeed +// ct := &collectHTTPTask{ +// TaskMeta: TaskMeta{ +// Task: StringPointer(CollectHTTPTaskName), +// }, +// With: collectHTTPInputs{ +// endpoint: endpoint{ +// URL: endpointURL, +// }, +// Grafana: true, +// }, +// } +// httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, +// func(req *http.Request) (*http.Response, error) { +// endpointCalled = true +// return httpmock.NewStringResponse(200, "success"), nil +// }) + +// exp := &Experiment{ +// Spec: []Task{ct}, +// Result: &ExperimentResult{}, +// } +// exp.initResults(1) +// err = ct.run(exp) +// // assert.NoError(t, err) +// assert.True(t, metricsServerCalled) // ensure that the /foo/ handler is called +// assert.True(t, endpointCalled) // ensure that the /foo/ handler is called +// assert.Equal(t, exp.Result.Insights.NumVersions, 1) +// } diff --git a/charts/iter8/values.yaml b/charts/iter8/values.yaml index 27990b3e4..237af21e7 100644 --- a/charts/iter8/values.yaml +++ b/charts/iter8/values.yaml @@ -2,7 +2,7 @@ iter8Image: iter8/iter8:0.15 ### majorMinor is the minor version of Iter8 -majorMinor: v0.15 +majorMinor: v0.16 ### runner for Kubernetes experiments may be job, cronjob, or none runner: none From e2f00819272f238b0d24d2cfa4caf8ff10d7e2ca Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 26 Jul 2023 14:55:18 -0400 Subject: [PATCH 009/121] Add test for HTTP Grafana dashboard Signed-off-by: Alan Cha --- base/collect_http.go | 4 +- base/collect_http_test.go | 120 +++++++++++++++++++------------------- charts/iter8/values.yaml | 2 +- 3 files changed, 65 insertions(+), 61 deletions(-) diff --git a/base/collect_http.go b/base/collect_http.go index d18c50ab1..d77423bc6 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -286,7 +286,9 @@ func putPerformanceResultToMetricsService(metricsServerURL, namespace, experimen } defer func() { err = resp.Body.Close() - log.Logger.Error("could not close response body: ", err) + if err != nil { + log.Logger.Error("could not close response body: ", err) + } }() log.Logger.Trace("sent request") diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 888483c1f..f20baa08d 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -385,62 +385,64 @@ func TestPutPerformanceResultToMetricsService(t *testing.T) { assert.True(t, called) } -// func TestRunCollectHTTPGrafana(t *testing.T) { -// startHTTPMock(t) - -// metricsServerURL := "http://iter8.default:8080" -// namespace := "default" -// experiment := "default" - -// err := os.Setenv("METRICS_SERVER_URL", metricsServerURL) -// assert.NoError(t, err) - -// metricsServerCalled := false -// httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, -// func(req *http.Request) (*http.Response, error) { -// metricsServerCalled = true - -// assert.Equal(t, namespace, req.URL.Query().Get("namespace")) -// assert.Equal(t, experiment, req.URL.Query().Get("experiment")) - -// // body, err := io.ReadAll(req.Body) -// // assert.NoError(t, err) -// // assert.Equal(t, "{\"hello\":\"world\"}", string(body)) - -// return httpmock.NewStringResponse(200, "success"), nil -// }) - -// endpointURL := "http://foo.com" -// endpointCalled := false -// httpmock.RegisterResponder(http.MethodGet, endpointURL, -// httpmock.NewStringResponder(200, "success")) - -// // valid collect HTTP task... should succeed -// ct := &collectHTTPTask{ -// TaskMeta: TaskMeta{ -// Task: StringPointer(CollectHTTPTaskName), -// }, -// With: collectHTTPInputs{ -// endpoint: endpoint{ -// URL: endpointURL, -// }, -// Grafana: true, -// }, -// } -// httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, -// func(req *http.Request) (*http.Response, error) { -// endpointCalled = true -// return httpmock.NewStringResponse(200, "success"), nil -// }) - -// exp := &Experiment{ -// Spec: []Task{ct}, -// Result: &ExperimentResult{}, -// } -// exp.initResults(1) -// err = ct.run(exp) -// // assert.NoError(t, err) -// assert.True(t, metricsServerCalled) // ensure that the /foo/ handler is called -// assert.True(t, endpointCalled) // ensure that the /foo/ handler is called -// assert.Equal(t, exp.Result.Insights.NumVersions, 1) -// } +func TestRunCollectHTTPGrafana(t *testing.T) { + // METRICS_SERVER_URL must be provided + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv("METRICS_SERVER_URL", metricsServerURL) + assert.NoError(t, err) + + // mock metrics server + metricsServerCalled := false + namespace := "default" + experiment := "default" + startHTTPMock(t) + httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, + func(req *http.Request) (*http.Response, error) { + metricsServerCalled = true + + assert.Equal(t, namespace, req.URL.Query().Get("namespace")) + assert.Equal(t, experiment, req.URL.Query().Get("experiment")) + + return httpmock.NewStringResponse(200, "success"), nil + }) + + mux, addr := fhttp.DynamicHTTPServer(false) + + // mock endpoint + endpointCalled := false + handler := func(w http.ResponseWriter, r *http.Request) { + endpointCalled = true + + w.WriteHeader(200) + } + mux.HandleFunc("/"+foo, handler) + + baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) + + // valid collect HTTP task... should succeed + ct := &collectHTTPTask{ + TaskMeta: TaskMeta{ + Task: StringPointer(CollectHTTPTaskName), + }, + With: collectHTTPInputs{ + endpoint: endpoint{ + URL: baseURL + foo, + }, + Grafana: true, + }, + } + + exp := &Experiment{ + Spec: []Task{ct}, + Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Namespace: "default", + Name: "default", + }, + } + exp.initResults(1) + err = ct.run(exp) + assert.NoError(t, err) + assert.True(t, metricsServerCalled) + assert.True(t, endpointCalled) +} diff --git a/charts/iter8/values.yaml b/charts/iter8/values.yaml index 237af21e7..27990b3e4 100644 --- a/charts/iter8/values.yaml +++ b/charts/iter8/values.yaml @@ -2,7 +2,7 @@ iter8Image: iter8/iter8:0.15 ### majorMinor is the minor version of Iter8 -majorMinor: v0.16 +majorMinor: v0.15 ### runner for Kubernetes experiments may be job, cronjob, or none runner: none From 5a426d861abe85beb146db97cea617733d0c0993 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 26 Jul 2023 17:37:30 -0400 Subject: [PATCH 010/121] Additional tests Signed-off-by: Alan Cha --- abn/service_impl.go | 6 ++ abn/service_test.go | 8 ++ metrics/server.go | 21 ++++- metrics/server_test.go | 209 ++++++++++++++++++++++++++++++++++++++++- 4 files changed, 239 insertions(+), 5 deletions(-) diff --git a/abn/service_impl.go b/abn/service_impl.go index 5691fe453..2359e24c3 100644 --- a/abn/service_impl.go +++ b/abn/service_impl.go @@ -47,6 +47,9 @@ func lookupInternal(application string, user string) (controllers.RoutemapInterf } // record user; ignore error if any; this is best effort + if MetricsClient == nil { + return nil, invalidVersion, fmt.Errorf("no metrics client") + } _ = MetricsClient.SetUser(application, versionNumber, *s.GetVersions()[versionNumber].GetSignature(), user) return s, versionNumber, nil @@ -131,6 +134,9 @@ func writeMetricInternal(application, user, metric, valueStr string) error { v := s.GetVersions()[versionNumber] transaction := uuid.NewString() + if MetricsClient == nil { + return fmt.Errorf("no metrics client") + } err = MetricsClient.SetMetric( s.GetNamespace()+"/"+s.GetName(), versionNumber, *v.GetSignature(), metric, user, transaction, diff --git a/abn/service_test.go b/abn/service_test.go index a21e12528..89905f704 100644 --- a/abn/service_test.go +++ b/abn/service_test.go @@ -212,16 +212,24 @@ func getMetricsCount(t *testing.T, namespace string, name string, version int, m if rm == nil || reflect.ValueOf(rm).IsNil() { return 0 } + assert.Less(t, version, len(rm.GetVersions())) v := rm.GetVersions()[version] signature := v.GetSignature() + if nil == signature { return 0 } + + // TODO: better error handling when there is no metrics client + if MetricsClient == nil { + return 0 + } versionmetrics, err := MetricsClient.GetMetrics(namespace+"/"+name, version, *signature) if err != nil { return 0 } + metrics, ok := (*versionmetrics)[metric] if !ok { return 0 diff --git a/metrics/server.go b/metrics/server.go index 39aff18de..b70f17902 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -170,6 +170,10 @@ func getMetrics(w http.ResponseWriter, r *http.Request) { continue } + if abn.MetricsClient == nil { + log.Logger.Error("no metrics client") + continue + } versionmetrics, err := abn.MetricsClient.GetMetrics(application, v, *signature) if err != nil { log.Logger.Debugf("no metrics found for application %s (version %d; signature %s)", application, v, *signature) @@ -469,11 +473,13 @@ func putResult(w http.ResponseWriter, r *http.Request) { namespace := r.URL.Query().Get("namespace") if namespace == "" { http.Error(w, "no namespace specified", http.StatusBadRequest) + return } experiment := r.URL.Query().Get("experiment") if experiment == "" { http.Error(w, "no experiment specified", http.StatusBadRequest) + return } log.Logger.Tracef("putResult called for namespace %s and experiment %s", namespace, experiment) @@ -495,14 +501,19 @@ func putResult(w http.ResponseWriter, r *http.Request) { return } - // TODO: 201 for new resource, 200 for update + if abn.MetricsClient == nil { + http.Error(w, "no metrics client", http.StatusInternalServerError) + return + } err = abn.MetricsClient.SetResult(namespace, experiment, body) if err != nil { errorMessage := fmt.Sprintf("cannot store result in storage client: %s: %e", string(body), err) log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusBadRequest) + http.Error(w, errorMessage, http.StatusInternalServerError) return } + + // TODO: 201 for new resource, 200 for update } // getHTTPDashboard handles GET /getHTTPDashboard with query parameter application=namespace/name @@ -534,6 +545,10 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { log.Logger.Tracef("getHTTPGrafana called for namespace %s and experiment %s", namespace, experiment) // get result from metrics client + if abn.MetricsClient == nil { + http.Error(w, "no metrics client", http.StatusInternalServerError) + return + } result, err := abn.MetricsClient.GetResult(namespace, experiment) if err != nil { errorMessage := fmt.Sprintf("cannot get result with namespace %s, experiment %s", namespace, experiment) @@ -549,7 +564,7 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { if err != nil { errorMessage := fmt.Sprintf("cannot JSON unmarshal result into FortioResult: \"%s\"", string(result)) log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusBadRequest) + http.Error(w, errorMessage, http.StatusInternalServerError) return } diff --git a/metrics/server_test.go b/metrics/server_test.go index c29ab3054..3cc219d99 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -1,11 +1,14 @@ package metrics import ( + "bytes" "context" "encoding/json" "fmt" + "io/ioutil" "net/http" "net/http/httptest" + "net/url" "os" "regexp" "sort" @@ -338,7 +341,7 @@ func getTestRM(namespace, name string) *testroutemap { } -func TestFortioCalculateHistogram(t *testing.T) { +func TestGetHTTPHistogram(t *testing.T) { data := []fstats.Bucket{ { Interval: fstats.Interval{ @@ -484,7 +487,7 @@ func TestFortioCalculateHistogram(t *testing.T) { fmt.Println(string(histogramJSON)) } -func TestFortioHistogramStats(t *testing.T) { +func TestGetHTTPDashboardHelper(t *testing.T) { result := "{\"EndpointResults\":{\"http://httpbin.default/get\":{\"RunType\":\"HTTP\",\"Labels\":\"\",\"StartTime\":\"2023-07-21T14:00:40.134434969Z\",\"RequestedQPS\":\"8\",\"RequestedDuration\":\"exactly 100 calls\",\"ActualQPS\":7.975606391552989,\"ActualDuration\":12538231589,\"NumThreads\":4,\"Version\":\"1.57.3\",\"DurationHistogram\":{\"Count\":100,\"Min\":0.004223875,\"Max\":0.040490042,\"Sum\":1.5977100850000001,\"Avg\":0.015977100850000002,\"StdDev\":0.008340658047253256,\"Data\":[{\"Start\":0.004223875,\"End\":0.005,\"Percent\":5,\"Count\":5},{\"Start\":0.005,\"End\":0.006,\"Percent\":10,\"Count\":5},{\"Start\":0.006,\"End\":0.007,\"Percent\":14,\"Count\":4},{\"Start\":0.007,\"End\":0.008,\"Percent\":19,\"Count\":5},{\"Start\":0.008,\"End\":0.009000000000000001,\"Percent\":24,\"Count\":5},{\"Start\":0.009000000000000001,\"End\":0.01,\"Percent\":28,\"Count\":4},{\"Start\":0.01,\"End\":0.011,\"Percent\":33,\"Count\":5},{\"Start\":0.011,\"End\":0.012,\"Percent\":36,\"Count\":3},{\"Start\":0.012,\"End\":0.014,\"Percent\":48,\"Count\":12},{\"Start\":0.014,\"End\":0.016,\"Percent\":55,\"Count\":7},{\"Start\":0.016,\"End\":0.018000000000000002,\"Percent\":65,\"Count\":10},{\"Start\":0.018000000000000002,\"End\":0.02,\"Percent\":74,\"Count\":9},{\"Start\":0.02,\"End\":0.025,\"Percent\":85,\"Count\":11},{\"Start\":0.025,\"End\":0.03,\"Percent\":93,\"Count\":8},{\"Start\":0.03,\"End\":0.035,\"Percent\":98,\"Count\":5},{\"Start\":0.035,\"End\":0.04,\"Percent\":99,\"Count\":1},{\"Start\":0.04,\"End\":0.040490042,\"Percent\":100,\"Count\":1}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.014571428571428572},{\"Percentile\":75,\"Value\":0.020454545454545454},{\"Percentile\":90,\"Value\":0.028125},{\"Percentile\":95,\"Value\":0.032},{\"Percentile\":99,\"Value\":0.04},{\"Percentile\":99.9,\"Value\":0.0404410378}]},\"ErrorsDurationHistogram\":{\"Count\":0,\"Min\":0,\"Max\":0,\"Sum\":0,\"Avg\":0,\"StdDev\":0,\"Data\":null},\"Exactly\":100,\"Jitter\":false,\"Uniform\":false,\"NoCatchUp\":false,\"RunID\":0,\"AccessLoggerInfo\":\"\",\"ID\":\"2023-07-21-140040\",\"RetCodes\":{\"200\":100},\"IPCountMap\":{\"10.96.108.76:80\":4},\"Insecure\":false,\"MTLS\":false,\"CACert\":\"\",\"Cert\":\"\",\"Key\":\"\",\"UnixDomainSocket\":\"\",\"URL\":\"http://httpbin.default/get\",\"NumConnections\":1,\"Compression\":false,\"DisableFastClient\":false,\"HTTP10\":false,\"H2\":false,\"DisableKeepAlive\":false,\"AllowHalfClose\":false,\"FollowRedirects\":false,\"Resolve\":\"\",\"HTTPReqTimeOut\":3000000000,\"UserCredentials\":\"\",\"ContentType\":\"\",\"Payload\":null,\"MethodOverride\":\"\",\"LogErrors\":false,\"SequentialWarmup\":false,\"ConnReuseRange\":[0,0],\"NoResolveEachConn\":false,\"Offset\":0,\"Resolution\":0.001,\"Sizes\":{\"Count\":100,\"Min\":413,\"Max\":413,\"Sum\":41300,\"Avg\":413,\"StdDev\":0,\"Data\":[{\"Start\":413,\"End\":413,\"Percent\":100,\"Count\":100}]},\"HeaderSizes\":{\"Count\":100,\"Min\":230,\"Max\":230,\"Sum\":23000,\"Avg\":230,\"StdDev\":0,\"Data\":[{\"Start\":230,\"End\":230,\"Percent\":100,\"Count\":100}]},\"Sockets\":[1,1,1,1],\"SocketCount\":4,\"ConnectionStats\":{\"Count\":4,\"Min\":0.001385875,\"Max\":0.001724375,\"Sum\":0.006404583,\"Avg\":0.00160114575,\"StdDev\":0.00013101857565508474,\"Data\":[{\"Start\":0.001385875,\"End\":0.001724375,\"Percent\":100,\"Count\":4}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.0014987083333333332},{\"Percentile\":75,\"Value\":0.0016115416666666667},{\"Percentile\":90,\"Value\":0.0016792416666666667},{\"Percentile\":95,\"Value\":0.0017018083333333333},{\"Percentile\":99,\"Value\":0.0017198616666666668},{\"Percentile\":99.9,\"Value\":0.0017239236666666668}]},\"AbortOn\":0}},\"Summary\":{\"numVersions\":1,\"versionNames\":null,\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"units\":\"msec\",\"type\":\"Histogram\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"histMetricValues\":[{\"http/latency\":[{\"lower\":4.2238750000000005,\"upper\":5,\"count\":5},{\"lower\":5,\"upper\":6,\"count\":5},{\"lower\":6,\"upper\":7,\"count\":4},{\"lower\":7,\"upper\":8,\"count\":5},{\"lower\":8,\"upper\":9.000000000000002,\"count\":5},{\"lower\":9.000000000000002,\"upper\":10,\"count\":4},{\"lower\":10,\"upper\":11,\"count\":5},{\"lower\":11,\"upper\":12,\"count\":3},{\"lower\":12,\"upper\":14,\"count\":12},{\"lower\":14,\"upper\":16,\"count\":7},{\"lower\":16,\"upper\":18.000000000000004,\"count\":10},{\"lower\":18.000000000000004,\"upper\":20,\"count\":9},{\"lower\":20,\"upper\":25,\"count\":11},{\"lower\":25,\"upper\":30,\"count\":8},{\"lower\":30,\"upper\":35,\"count\":5},{\"lower\":35,\"upper\":40,\"count\":1},{\"lower\":40,\"upper\":40.490041999999995,\"count\":1}]}],\"SummaryMetricValues\":[{}]}}" fortioResult := util.FortioResult{} @@ -502,3 +505,205 @@ func TestFortioHistogramStats(t *testing.T) { string(dashboardBytes), ) } + +func TestPutResultInvalidMethod(t *testing.T) { + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, util.PerformanceResultPath, nil) + putResult(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) +} + +func TestPutResultMissingParameter(t *testing.T) { + tests := []struct { + queryParams url.Values + expectedStatusCode int + }{ + { + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "namespace": {"default"}, + }, + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "experiment": {"default"}, + }, + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "namespace": {"default"}, + "experiment": {"default"}, + }, + expectedStatusCode: http.StatusInternalServerError, // queryParams exist but no metrics client + }, + } + + for _, test := range tests { + w := httptest.NewRecorder() + + u, err := url.ParseRequestURI(util.PerformanceResultPath) + assert.NoError(t, err) + u.RawQuery = test.queryParams.Encode() + urlStr := fmt.Sprintf("%v", u) + + req := httptest.NewRequest(http.MethodPut, urlStr, nil) + + putResult(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + + assert.Equal(t, test.expectedStatusCode, res.StatusCode) + } +} + +func TestPutResult(t *testing.T) { + // instantiate metrics client + tempDirPath := t.TempDir() + client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) + assert.NoError(t, err) + abn.MetricsClient = client + + w := httptest.NewRecorder() + + // construct inputs to putResult + u, err := url.ParseRequestURI(util.PerformanceResultPath) + assert.NoError(t, err) + params := url.Values{ + "namespace": {"default"}, + "experiment": {"default"}, + } + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) + + payload := `{"hello":"world"}` + req := httptest.NewRequest(http.MethodPut, urlStr, bytes.NewBuffer([]byte(payload))) + + // put result into the metrics client + putResult(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + + // check to see if the result is stored in the metrics client + result, err := abn.MetricsClient.GetResult("default", "default") + assert.NoError(t, err) + assert.Equal(t, payload, string(result)) +} + +func TestGetHTTPDashboardInvalidMethod(t *testing.T) { + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/httpDashboard", nil) + getHTTPDashboard(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) +} + +func TestGetHTTPDashboardMissingParameter(t *testing.T) { + tests := []struct { + queryParams url.Values + expectedStatusCode int + }{ + { + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "namespace": {"default"}, + }, + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "experiment": {"default"}, + }, + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "namespace": {"default"}, + "experiment": {"default"}, + }, + expectedStatusCode: http.StatusInternalServerError, // queryParams exist but no metrics client + }, + } + + for _, test := range tests { + w := httptest.NewRecorder() + + u, err := url.ParseRequestURI(util.PerformanceResultPath) + assert.NoError(t, err) + u.RawQuery = test.queryParams.Encode() + urlStr := fmt.Sprintf("%v", u) + req := httptest.NewRequest(http.MethodGet, urlStr, nil) + + getHTTPDashboard(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + + assert.Equal(t, test.expectedStatusCode, res.StatusCode) + } +} + +func TestGetHTTPDashboard(t *testing.T) { + // instantiate metrics client + tempDirPath := t.TempDir() + client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) + assert.NoError(t, err) + abn.MetricsClient = client + + // preload metric client with result + result := "{\"EndpointResults\":{\"http://httpbin.default/get\":{\"RunType\":\"HTTP\",\"Labels\":\"\",\"StartTime\":\"2023-07-21T14:00:40.134434969Z\",\"RequestedQPS\":\"8\",\"RequestedDuration\":\"exactly 100 calls\",\"ActualQPS\":7.975606391552989,\"ActualDuration\":12538231589,\"NumThreads\":4,\"Version\":\"1.57.3\",\"DurationHistogram\":{\"Count\":100,\"Min\":0.004223875,\"Max\":0.040490042,\"Sum\":1.5977100850000001,\"Avg\":0.015977100850000002,\"StdDev\":0.008340658047253256,\"Data\":[{\"Start\":0.004223875,\"End\":0.005,\"Percent\":5,\"Count\":5},{\"Start\":0.005,\"End\":0.006,\"Percent\":10,\"Count\":5},{\"Start\":0.006,\"End\":0.007,\"Percent\":14,\"Count\":4},{\"Start\":0.007,\"End\":0.008,\"Percent\":19,\"Count\":5},{\"Start\":0.008,\"End\":0.009000000000000001,\"Percent\":24,\"Count\":5},{\"Start\":0.009000000000000001,\"End\":0.01,\"Percent\":28,\"Count\":4},{\"Start\":0.01,\"End\":0.011,\"Percent\":33,\"Count\":5},{\"Start\":0.011,\"End\":0.012,\"Percent\":36,\"Count\":3},{\"Start\":0.012,\"End\":0.014,\"Percent\":48,\"Count\":12},{\"Start\":0.014,\"End\":0.016,\"Percent\":55,\"Count\":7},{\"Start\":0.016,\"End\":0.018000000000000002,\"Percent\":65,\"Count\":10},{\"Start\":0.018000000000000002,\"End\":0.02,\"Percent\":74,\"Count\":9},{\"Start\":0.02,\"End\":0.025,\"Percent\":85,\"Count\":11},{\"Start\":0.025,\"End\":0.03,\"Percent\":93,\"Count\":8},{\"Start\":0.03,\"End\":0.035,\"Percent\":98,\"Count\":5},{\"Start\":0.035,\"End\":0.04,\"Percent\":99,\"Count\":1},{\"Start\":0.04,\"End\":0.040490042,\"Percent\":100,\"Count\":1}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.014571428571428572},{\"Percentile\":75,\"Value\":0.020454545454545454},{\"Percentile\":90,\"Value\":0.028125},{\"Percentile\":95,\"Value\":0.032},{\"Percentile\":99,\"Value\":0.04},{\"Percentile\":99.9,\"Value\":0.0404410378}]},\"ErrorsDurationHistogram\":{\"Count\":0,\"Min\":0,\"Max\":0,\"Sum\":0,\"Avg\":0,\"StdDev\":0,\"Data\":null},\"Exactly\":100,\"Jitter\":false,\"Uniform\":false,\"NoCatchUp\":false,\"RunID\":0,\"AccessLoggerInfo\":\"\",\"ID\":\"2023-07-21-140040\",\"RetCodes\":{\"200\":100},\"IPCountMap\":{\"10.96.108.76:80\":4},\"Insecure\":false,\"MTLS\":false,\"CACert\":\"\",\"Cert\":\"\",\"Key\":\"\",\"UnixDomainSocket\":\"\",\"URL\":\"http://httpbin.default/get\",\"NumConnections\":1,\"Compression\":false,\"DisableFastClient\":false,\"HTTP10\":false,\"H2\":false,\"DisableKeepAlive\":false,\"AllowHalfClose\":false,\"FollowRedirects\":false,\"Resolve\":\"\",\"HTTPReqTimeOut\":3000000000,\"UserCredentials\":\"\",\"ContentType\":\"\",\"Payload\":null,\"MethodOverride\":\"\",\"LogErrors\":false,\"SequentialWarmup\":false,\"ConnReuseRange\":[0,0],\"NoResolveEachConn\":false,\"Offset\":0,\"Resolution\":0.001,\"Sizes\":{\"Count\":100,\"Min\":413,\"Max\":413,\"Sum\":41300,\"Avg\":413,\"StdDev\":0,\"Data\":[{\"Start\":413,\"End\":413,\"Percent\":100,\"Count\":100}]},\"HeaderSizes\":{\"Count\":100,\"Min\":230,\"Max\":230,\"Sum\":23000,\"Avg\":230,\"StdDev\":0,\"Data\":[{\"Start\":230,\"End\":230,\"Percent\":100,\"Count\":100}]},\"Sockets\":[1,1,1,1],\"SocketCount\":4,\"ConnectionStats\":{\"Count\":4,\"Min\":0.001385875,\"Max\":0.001724375,\"Sum\":0.006404583,\"Avg\":0.00160114575,\"StdDev\":0.00013101857565508474,\"Data\":[{\"Start\":0.001385875,\"End\":0.001724375,\"Percent\":100,\"Count\":4}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.0014987083333333332},{\"Percentile\":75,\"Value\":0.0016115416666666667},{\"Percentile\":90,\"Value\":0.0016792416666666667},{\"Percentile\":95,\"Value\":0.0017018083333333333},{\"Percentile\":99,\"Value\":0.0017198616666666668},{\"Percentile\":99.9,\"Value\":0.0017239236666666668}]},\"AbortOn\":0}},\"Summary\":{\"numVersions\":1,\"versionNames\":null,\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"units\":\"msec\",\"type\":\"Histogram\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"histMetricValues\":[{\"http/latency\":[{\"lower\":4.2238750000000005,\"upper\":5,\"count\":5},{\"lower\":5,\"upper\":6,\"count\":5},{\"lower\":6,\"upper\":7,\"count\":4},{\"lower\":7,\"upper\":8,\"count\":5},{\"lower\":8,\"upper\":9.000000000000002,\"count\":5},{\"lower\":9.000000000000002,\"upper\":10,\"count\":4},{\"lower\":10,\"upper\":11,\"count\":5},{\"lower\":11,\"upper\":12,\"count\":3},{\"lower\":12,\"upper\":14,\"count\":12},{\"lower\":14,\"upper\":16,\"count\":7},{\"lower\":16,\"upper\":18.000000000000004,\"count\":10},{\"lower\":18.000000000000004,\"upper\":20,\"count\":9},{\"lower\":20,\"upper\":25,\"count\":11},{\"lower\":25,\"upper\":30,\"count\":8},{\"lower\":30,\"upper\":35,\"count\":5},{\"lower\":35,\"upper\":40,\"count\":1},{\"lower\":40,\"upper\":40.490041999999995,\"count\":1}]}],\"SummaryMetricValues\":[{}]}}" + abn.MetricsClient.SetResult("default", "default", []byte(result)) + + w := httptest.NewRecorder() + + // construct inputs to getHTTPDashboard + u, err := url.ParseRequestURI(util.PerformanceResultPath) + assert.NoError(t, err) + params := url.Values{ + "namespace": {"default"}, + "experiment": {"default"}, + } + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) + + req := httptest.NewRequest(http.MethodGet, urlStr, nil) + + // get HTTP dashboard based on result in metrics client + getHTTPDashboard(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + + // check the HTTP dashboard + body, err := ioutil.ReadAll(res.Body) + assert.NoError(t, err) + assert.Equal( + t, + `{"Endpoints":{"http://httpbin.default/get":{"Durations":[{"Version":"0","Bucket":"4.2 - 5","Value":5},{"Version":"0","Bucket":"5 - 6","Value":5},{"Version":"0","Bucket":"6 - 7","Value":4},{"Version":"0","Bucket":"7 - 8","Value":5},{"Version":"0","Bucket":"8 - 9","Value":5},{"Version":"0","Bucket":"9 - 10","Value":4},{"Version":"0","Bucket":"10 - 11","Value":5},{"Version":"0","Bucket":"11 - 12","Value":3},{"Version":"0","Bucket":"12 - 14","Value":12},{"Version":"0","Bucket":"14 - 16","Value":7},{"Version":"0","Bucket":"16 - 18","Value":10},{"Version":"0","Bucket":"18 - 20","Value":9},{"Version":"0","Bucket":"20 - 25","Value":11},{"Version":"0","Bucket":"25 - 30","Value":8},{"Version":"0","Bucket":"30 - 35","Value":5},{"Version":"0","Bucket":"35 - 40","Value":1},{"Version":"0","Bucket":"40 - 40.4","Value":1}],"Statistics":{"Count":100,"Mean":15.977100850000001,"StdDev":8.340658047253257,"Min":4.2238750000000005,"Max":40.490041999999995},"Error durations":[],"Error statistics":{"Count":0,"Mean":0,"StdDev":0,"Min":0,"Max":0},"Return codes":{"200":100}}},"Summary":{"numVersions":1,"versionNames":null,"metricsInfo":{"http/latency":{"description":"Latency Histogram","units":"msec","type":"Histogram"},"http://httpbin.default/get/error-count":{"description":"number of responses that were errors","type":"Counter"},"http://httpbin.default/get/error-rate":{"description":"fraction of responses that were errors","type":"Gauge"},"http://httpbin.default/get/latency-max":{"description":"maximum of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-mean":{"description":"mean of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-min":{"description":"minimum of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p50":{"description":"50-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p75":{"description":"75-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p90":{"description":"90-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p95":{"description":"95-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p99":{"description":"99-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p99.9":{"description":"99.9-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-stddev":{"description":"standard deviation of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/request-count":{"description":"number of requests sent","type":"Counter"}},"nonHistMetricValues":[{"http://httpbin.default/get/error-count":[0],"http://httpbin.default/get/error-rate":[0],"http://httpbin.default/get/latency-max":[40.490041999999995],"http://httpbin.default/get/latency-mean":[15.977100850000001],"http://httpbin.default/get/latency-min":[4.2238750000000005],"http://httpbin.default/get/latency-p50":[14.571428571428571],"http://httpbin.default/get/latency-p75":[20.454545454545453],"http://httpbin.default/get/latency-p90":[28.125],"http://httpbin.default/get/latency-p95":[32],"http://httpbin.default/get/latency-p99":[40],"http://httpbin.default/get/latency-p99.9":[40.441037800000004],"http://httpbin.default/get/latency-stddev":[8.340658047253257],"http://httpbin.default/get/request-count":[100]}],"histMetricValues":[{"http/latency":[{"lower":4.2238750000000005,"upper":5,"count":5},{"lower":5,"upper":6,"count":5},{"lower":6,"upper":7,"count":4},{"lower":7,"upper":8,"count":5},{"lower":8,"upper":9.000000000000002,"count":5},{"lower":9.000000000000002,"upper":10,"count":4},{"lower":10,"upper":11,"count":5},{"lower":11,"upper":12,"count":3},{"lower":12,"upper":14,"count":12},{"lower":14,"upper":16,"count":7},{"lower":16,"upper":18.000000000000004,"count":10},{"lower":18.000000000000004,"upper":20,"count":9},{"lower":20,"upper":25,"count":11},{"lower":25,"upper":30,"count":8},{"lower":30,"upper":35,"count":5},{"lower":35,"upper":40,"count":1},{"lower":40,"upper":40.490041999999995,"count":1}]}],"SummaryMetricValues":[{}]}}`, + string(body), + ) +} From e6cc63d2eb435c708fcb7fc9ce95289cf374d8fa Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 26 Jul 2023 17:44:47 -0400 Subject: [PATCH 011/121] Fix test Signed-off-by: Alan Cha --- metrics/server_test.go | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/metrics/server_test.go b/metrics/server_test.go index 3cc219d99..b784b8751 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -538,13 +538,6 @@ func TestPutResultMissingParameter(t *testing.T) { }, expectedStatusCode: http.StatusBadRequest, }, - { - queryParams: url.Values{ - "namespace": {"default"}, - "experiment": {"default"}, - }, - expectedStatusCode: http.StatusInternalServerError, // queryParams exist but no metrics client - }, } for _, test := range tests { @@ -636,13 +629,6 @@ func TestGetHTTPDashboardMissingParameter(t *testing.T) { }, expectedStatusCode: http.StatusBadRequest, }, - { - queryParams: url.Values{ - "namespace": {"default"}, - "experiment": {"default"}, - }, - expectedStatusCode: http.StatusInternalServerError, // queryParams exist but no metrics client - }, } for _, test := range tests { @@ -674,7 +660,8 @@ func TestGetHTTPDashboard(t *testing.T) { // preload metric client with result result := "{\"EndpointResults\":{\"http://httpbin.default/get\":{\"RunType\":\"HTTP\",\"Labels\":\"\",\"StartTime\":\"2023-07-21T14:00:40.134434969Z\",\"RequestedQPS\":\"8\",\"RequestedDuration\":\"exactly 100 calls\",\"ActualQPS\":7.975606391552989,\"ActualDuration\":12538231589,\"NumThreads\":4,\"Version\":\"1.57.3\",\"DurationHistogram\":{\"Count\":100,\"Min\":0.004223875,\"Max\":0.040490042,\"Sum\":1.5977100850000001,\"Avg\":0.015977100850000002,\"StdDev\":0.008340658047253256,\"Data\":[{\"Start\":0.004223875,\"End\":0.005,\"Percent\":5,\"Count\":5},{\"Start\":0.005,\"End\":0.006,\"Percent\":10,\"Count\":5},{\"Start\":0.006,\"End\":0.007,\"Percent\":14,\"Count\":4},{\"Start\":0.007,\"End\":0.008,\"Percent\":19,\"Count\":5},{\"Start\":0.008,\"End\":0.009000000000000001,\"Percent\":24,\"Count\":5},{\"Start\":0.009000000000000001,\"End\":0.01,\"Percent\":28,\"Count\":4},{\"Start\":0.01,\"End\":0.011,\"Percent\":33,\"Count\":5},{\"Start\":0.011,\"End\":0.012,\"Percent\":36,\"Count\":3},{\"Start\":0.012,\"End\":0.014,\"Percent\":48,\"Count\":12},{\"Start\":0.014,\"End\":0.016,\"Percent\":55,\"Count\":7},{\"Start\":0.016,\"End\":0.018000000000000002,\"Percent\":65,\"Count\":10},{\"Start\":0.018000000000000002,\"End\":0.02,\"Percent\":74,\"Count\":9},{\"Start\":0.02,\"End\":0.025,\"Percent\":85,\"Count\":11},{\"Start\":0.025,\"End\":0.03,\"Percent\":93,\"Count\":8},{\"Start\":0.03,\"End\":0.035,\"Percent\":98,\"Count\":5},{\"Start\":0.035,\"End\":0.04,\"Percent\":99,\"Count\":1},{\"Start\":0.04,\"End\":0.040490042,\"Percent\":100,\"Count\":1}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.014571428571428572},{\"Percentile\":75,\"Value\":0.020454545454545454},{\"Percentile\":90,\"Value\":0.028125},{\"Percentile\":95,\"Value\":0.032},{\"Percentile\":99,\"Value\":0.04},{\"Percentile\":99.9,\"Value\":0.0404410378}]},\"ErrorsDurationHistogram\":{\"Count\":0,\"Min\":0,\"Max\":0,\"Sum\":0,\"Avg\":0,\"StdDev\":0,\"Data\":null},\"Exactly\":100,\"Jitter\":false,\"Uniform\":false,\"NoCatchUp\":false,\"RunID\":0,\"AccessLoggerInfo\":\"\",\"ID\":\"2023-07-21-140040\",\"RetCodes\":{\"200\":100},\"IPCountMap\":{\"10.96.108.76:80\":4},\"Insecure\":false,\"MTLS\":false,\"CACert\":\"\",\"Cert\":\"\",\"Key\":\"\",\"UnixDomainSocket\":\"\",\"URL\":\"http://httpbin.default/get\",\"NumConnections\":1,\"Compression\":false,\"DisableFastClient\":false,\"HTTP10\":false,\"H2\":false,\"DisableKeepAlive\":false,\"AllowHalfClose\":false,\"FollowRedirects\":false,\"Resolve\":\"\",\"HTTPReqTimeOut\":3000000000,\"UserCredentials\":\"\",\"ContentType\":\"\",\"Payload\":null,\"MethodOverride\":\"\",\"LogErrors\":false,\"SequentialWarmup\":false,\"ConnReuseRange\":[0,0],\"NoResolveEachConn\":false,\"Offset\":0,\"Resolution\":0.001,\"Sizes\":{\"Count\":100,\"Min\":413,\"Max\":413,\"Sum\":41300,\"Avg\":413,\"StdDev\":0,\"Data\":[{\"Start\":413,\"End\":413,\"Percent\":100,\"Count\":100}]},\"HeaderSizes\":{\"Count\":100,\"Min\":230,\"Max\":230,\"Sum\":23000,\"Avg\":230,\"StdDev\":0,\"Data\":[{\"Start\":230,\"End\":230,\"Percent\":100,\"Count\":100}]},\"Sockets\":[1,1,1,1],\"SocketCount\":4,\"ConnectionStats\":{\"Count\":4,\"Min\":0.001385875,\"Max\":0.001724375,\"Sum\":0.006404583,\"Avg\":0.00160114575,\"StdDev\":0.00013101857565508474,\"Data\":[{\"Start\":0.001385875,\"End\":0.001724375,\"Percent\":100,\"Count\":4}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.0014987083333333332},{\"Percentile\":75,\"Value\":0.0016115416666666667},{\"Percentile\":90,\"Value\":0.0016792416666666667},{\"Percentile\":95,\"Value\":0.0017018083333333333},{\"Percentile\":99,\"Value\":0.0017198616666666668},{\"Percentile\":99.9,\"Value\":0.0017239236666666668}]},\"AbortOn\":0}},\"Summary\":{\"numVersions\":1,\"versionNames\":null,\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"units\":\"msec\",\"type\":\"Histogram\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"histMetricValues\":[{\"http/latency\":[{\"lower\":4.2238750000000005,\"upper\":5,\"count\":5},{\"lower\":5,\"upper\":6,\"count\":5},{\"lower\":6,\"upper\":7,\"count\":4},{\"lower\":7,\"upper\":8,\"count\":5},{\"lower\":8,\"upper\":9.000000000000002,\"count\":5},{\"lower\":9.000000000000002,\"upper\":10,\"count\":4},{\"lower\":10,\"upper\":11,\"count\":5},{\"lower\":11,\"upper\":12,\"count\":3},{\"lower\":12,\"upper\":14,\"count\":12},{\"lower\":14,\"upper\":16,\"count\":7},{\"lower\":16,\"upper\":18.000000000000004,\"count\":10},{\"lower\":18.000000000000004,\"upper\":20,\"count\":9},{\"lower\":20,\"upper\":25,\"count\":11},{\"lower\":25,\"upper\":30,\"count\":8},{\"lower\":30,\"upper\":35,\"count\":5},{\"lower\":35,\"upper\":40,\"count\":1},{\"lower\":40,\"upper\":40.490041999999995,\"count\":1}]}],\"SummaryMetricValues\":[{}]}}" - abn.MetricsClient.SetResult("default", "default", []byte(result)) + err = abn.MetricsClient.SetResult("default", "default", []byte(result)) + assert.NoError(t, err) w := httptest.NewRecorder() @@ -699,7 +686,7 @@ func TestGetHTTPDashboard(t *testing.T) { }() // check the HTTP dashboard - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) assert.NoError(t, err) assert.Equal( t, From 5a37c499dc3eb1adb29c2a9b26452528f10c8c72 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 28 Jul 2023 08:38:57 -0400 Subject: [PATCH 012/121] Address Michael's comments Signed-off-by: Alan Cha --- base/collect_http.go | 16 +++++----------- base/experiment.go | 2 +- charts/iter8/values.yaml | 3 --- driver/kubedriver_test.go | 4 +--- metrics/server.go | 14 +++++++------- 5 files changed, 14 insertions(+), 25 deletions(-) diff --git a/base/collect_http.go b/base/collect_http.go index d77423bc6..28c16fc81 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -72,7 +72,7 @@ type collectHTTPInputs struct { // This data will be transformed into httpDashboard when getHTTPGrafana is called type FortioResult struct { // key is the endpoint - EndpointResults map[string]fhttp.HTTPRunnerResults + EndpointResults map[string]*fhttp.HTTPRunnerResults Summary Insights } @@ -300,11 +300,11 @@ func putPerformanceResultToMetricsService(metricsServerURL, namespace, experimen // func (t *collectHTTPTask) getFortioResults() (*fhttp.HTTPRunnerResults, error) { // key is the metric prefix // key is the endpoint -func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults, error) { +func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResults, error) { // the main idea is to run Fortio with proper options var err error - results := map[string]fhttp.HTTPRunnerResults{} + results := map[string]*fhttp.HTTPRunnerResults{} if len(t.With.Endpoints) > 0 { log.Logger.Trace("multiple endpoints") for endpointID, endpoint := range t.With.Endpoints { @@ -338,7 +338,7 @@ func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults if t.With.Grafana { resultsKey = endpoint.URL } - results[resultsKey] = *ifr + results[resultsKey] = ifr } } else { fo, err := getFortioOptions(t.With.endpoint) @@ -362,7 +362,7 @@ func (t *collectHTTPTask) getFortioResults() (map[string]fhttp.HTTPRunnerResults if t.With.Grafana { resultsKey = t.With.endpoint.URL } - results[resultsKey] = *ifr + results[resultsKey] = ifr } return results, err @@ -383,12 +383,6 @@ func (t *collectHTTPTask) run(exp *Experiment) error { return err } - // TODO: warmup option - // // ignore results if warmup - // if t.With.Warmup != nil && *t.With.Warmup { - // log.Logger.Debug("warmup: ignoring results") - // return nil - // } // ignore results if warmup if t.With.Warmup != nil && *t.With.Warmup { log.Logger.Debug("warmup: ignoring results") diff --git a/base/experiment.go b/base/experiment.go index b05453bcd..c36b9fda7 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -45,7 +45,7 @@ type ExperimentMetadata struct { // Experiment struct containing spec and result type Experiment struct { - Metadata ExperimentMetadata + Metadata ExperimentMetadata `json:"metadata" yaml:"metadata"` // Spec is the sequence of tasks that constitute this experiment Spec ExperimentSpec `json:"spec" yaml:"spec"` diff --git a/charts/iter8/values.yaml b/charts/iter8/values.yaml index 27990b3e4..208fdf1ce 100644 --- a/charts/iter8/values.yaml +++ b/charts/iter8/values.yaml @@ -9,9 +9,6 @@ runner: none logLevel: info -abnmetrics: - endpoint: iter8-abn:50051 - ### resources are the resource limits for the pods resources: requests: diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index 9c32555bd..773290f05 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -67,8 +67,6 @@ func TestKOps(t *testing.T) { func TestKubeRun(t *testing.T) { _ = os.Chdir(t.TempDir()) - err := os.Setenv(base.MetricsServerURL, "http://iter8.default:8080") - assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -102,7 +100,7 @@ func TestKubeRun(t *testing.T) { }, }, metav1.CreateOptions{}) - err = base.RunExperiment(false, kd) + err := base.RunExperiment(false, kd) assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) diff --git a/metrics/server.go b/metrics/server.go index b70f17902..e577ac957 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -64,8 +64,8 @@ type metricSummary struct { SummaryOverUsers []*versionSummarizedMetric } -// httpEndpointPanel is the data needed to produce a single panel for -type httpEndpointPanel struct { +// httpEndpointRow is the data needed to produce a single row in the Iter8 Grafana dashboard +type httpEndpointRow struct { Durations grafanaHistogram Statistics storage.SummarizedMetric @@ -77,7 +77,7 @@ type httpEndpointPanel struct { type httpDashboard struct { // key is the endpoint - Endpoints map[string]httpEndpointPanel + Endpoints map[string]httpEndpointRow Summary util.Insights } @@ -422,8 +422,8 @@ func getHTTPStatistics(fortioHistogram *fstats.HistogramData, decimalPlace float } } -func getHTTPEndpointPanel(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpointPanel { - result := httpEndpointPanel{} +func getHTTPEndpointRow(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpointRow { + result := httpEndpointRow{} if httpRunnerResults.DurationHistogram != nil { result.Durations = getHTTPHistogram(httpRunnerResults.DurationHistogram.Data, 1) result.Statistics = getHTTPStatistics(httpRunnerResults.DurationHistogram, 1) @@ -442,12 +442,12 @@ func getHTTPEndpointPanel(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpoi func getHTTPDashboardHelper(fortioResult util.FortioResult) httpDashboard { // add endpoint results dashboard := httpDashboard{ - Endpoints: map[string]httpEndpointPanel{}, + Endpoints: map[string]httpEndpointRow{}, } for endpoint, endpointResult := range fortioResult.EndpointResults { endpointResult := endpointResult - dashboard.Endpoints[endpoint] = getHTTPEndpointPanel(&endpointResult) + dashboard.Endpoints[endpoint] = getHTTPEndpointRow(endpointResult) } // add summary From e27a4f9c92f4c2e6ab7caa4d0c2d012085f97853 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 27 Jul 2023 11:17:25 -0400 Subject: [PATCH 013/121] gRPC Grafana dashboard Signed-off-by: Alan Cha --- base/collect_grpc.go | 135 ++++-- metrics/server.go | 155 ++++++- metrics/server_test.go | 966 ++++++++++++++++++++++++++++++++++------- 3 files changed, 1046 insertions(+), 210 deletions(-) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index 1bb89c2fc..9b1ddd6bc 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -2,6 +2,7 @@ package base import ( "fmt" + "os" "time" "github.com/bojand/ghz/runner" @@ -38,6 +39,11 @@ type collectGRPCInputs struct { // Endpoints is used to define multiple endpoints to test Endpoints map[string]runner.Config `json:"endpoints" yaml:"endpoints"` + + // Determines if Grafana dashboard should be created + // dasboard vs report/assess tasks + // TODO: remove + Grafana bool `json:"grafana" yaml:"grafana"` } // collectGRPCTask enables load testing of gRPC services. @@ -49,6 +55,15 @@ type collectGRPCTask struct { With collectGRPCInputs `json:"with" yaml:"with"` } +// GHZResult is the raw data sent to the metrics server +// This data will be transformed into httpDashboard when getGHZGrafana is called +type GHZResult struct { + // key is the endpoint + EndpointResults map[string]runner.Report + + Summary Insights +} + // initializeDefaults sets default values for the collect task func (t *collectGRPCTask) initializeDefaults() { // set defaults @@ -71,11 +86,11 @@ func (t *collectGRPCTask) validateInputs() error { } // resultForVersion collects gRPC test result for a given version -func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) { +func (t *collectGRPCTask) resultForVersion() (map[string]runner.Report, error) { // the main idea is to run ghz with proper options var err error - results := map[string]*runner.Report{} + results := map[string]runner.Report{} if len(t.With.Endpoints) > 0 { log.Logger.Trace("multiple endpoints") @@ -108,7 +123,11 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) continue } - results[gRPCMetricPrefix+"-"+endpointID] = igr + resultsKey := gRPCMetricPrefix + "-" + endpointID + if t.With.Grafana { + resultsKey = endpoint.Call + } + results[resultsKey] = *igr } } else { // TODO: supply all the allowed options @@ -121,7 +140,11 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) return results, err } - results[gRPCMetricPrefix] = igr + resultsKey := gRPCMetricPrefix + if t.With.Grafana { + resultsKey = t.With.Call + } + results[resultsKey] = *igr } return results, err @@ -170,60 +193,80 @@ func (t *collectGRPCTask) run(exp *Experiment) error { } in := exp.Result.Insights - // 4. Populate all metrics collected by this task - for provider, data := range data { - // populate grpc request count - // todo: this logic breaks for looped experiments. Fix when we get to loops. - m := provider + "/" + gRPCRequestCountMetricName - mm := MetricMeta{ - Description: "number of gRPC requests sent", - Type: CounterMetricType, - } - if err = in.updateMetric(m, mm, 0, float64(data.Count)); err != nil { - return err + if t.With.Grafana { + // push data to metrics service + ghzResult := GHZResult{ + EndpointResults: data, + Summary: *exp.Result.Insights, } - // populate error count & rate - ec := float64(0) - for _, count := range data.ErrorDist { - ec += float64(count) + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) } - // populate count - // todo: This logic breaks for looped experiments. Fix when we get to loops. - m = provider + "/" + gRPCErrorCountMetricName - mm = MetricMeta{ - Description: "number of responses that were errors", - Type: CounterMetricType, - } - if err = in.updateMetric(m, mm, 0, ec); err != nil { + if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, ghzResult); err != nil { return err } + } else { + // 4. Populate all metrics collected by this task + for provider, data := range data { + // populate grpc request count + // todo: this logic breaks for looped experiments. Fix when we get to loops. + m := provider + "/" + gRPCRequestCountMetricName + mm := MetricMeta{ + Description: "number of gRPC requests sent", + Type: CounterMetricType, + } + if err = in.updateMetric(m, mm, 0, float64(data.Count)); err != nil { + return err + } + + // populate error count & rate + ec := float64(0) + for _, count := range data.ErrorDist { + ec += float64(count) + } - // populate rate - // todo: This logic breaks for looped experiments. Fix when we get to loops. - m = provider + "/" + gRPCErrorRateMetricName - rc := float64(data.Count) - if rc != 0 { + // populate count + // todo: This logic breaks for looped experiments. Fix when we get to loops. + m = provider + "/" + gRPCErrorCountMetricName mm = MetricMeta{ - Description: "fraction of responses that were errors", - Type: GaugeMetricType, + Description: "number of responses that were errors", + Type: CounterMetricType, } - if err = in.updateMetric(m, mm, 0, ec/rc); err != nil { + if err = in.updateMetric(m, mm, 0, ec); err != nil { return err } - } - // populate latency sample - m = provider + "/" + gRPCLatencySampleMetricName - mm = MetricMeta{ - Description: "gRPC Latency Sample", - Type: SampleMetricType, - Units: StringPointer("msec"), - } - lh := latencySample(data.Details) - if err = in.updateMetric(m, mm, 0, lh); err != nil { - return err + // populate rate + // todo: This logic breaks for looped experiments. Fix when we get to loops. + m = provider + "/" + gRPCErrorRateMetricName + rc := float64(data.Count) + if rc != 0 { + mm = MetricMeta{ + Description: "fraction of responses that were errors", + Type: GaugeMetricType, + } + if err = in.updateMetric(m, mm, 0, ec/rc); err != nil { + return err + } + } + + // populate latency sample + m = provider + "/" + gRPCLatencySampleMetricName + mm = MetricMeta{ + Description: "gRPC Latency Sample", + Type: SampleMetricType, + Units: StringPointer("msec"), + } + lh := latencySample(data.Details) + if err = in.updateMetric(m, mm, 0, lh); err != nil { + return err + } } } diff --git a/metrics/server.go b/metrics/server.go index e577ac957..acd53eb83 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -11,6 +11,7 @@ import ( "strconv" "time" + "github.com/bojand/ghz/runner" "github.com/iter8-tools/iter8/abn" util "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/base/log" @@ -82,6 +83,24 @@ type httpDashboard struct { Summary util.Insights } +type ghzStatistics struct { + Count uint64 + ErrorCount float64 +} + +type ghzEndpointPanel struct { + Durations grafanaHistogram + Statistics ghzStatistics + StatusCodeDistribution map[string]int `json:"Status codes"` +} + +type ghzDashboard struct { + // key is the endpoint + Endpoints map[string]ghzEndpointPanel + + Summary util.Insights +} + var allRoutemaps controllers.AllRouteMapsInterface = &controllers.DefaultRoutemaps{} // Start starts the HTTP server @@ -102,6 +121,7 @@ func Start(stopCh <-chan struct{}) error { http.HandleFunc("/metrics", getMetrics) http.HandleFunc(util.PerformanceResultPath, putResult) http.HandleFunc("/httpDashboard", getHTTPDashboard) + http.HandleFunc("/ghzDashboard", getGHZDashboard) // configure HTTP server server := &http.Server{ @@ -425,22 +445,21 @@ func getHTTPStatistics(fortioHistogram *fstats.HistogramData, decimalPlace float func getHTTPEndpointRow(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpointRow { result := httpEndpointRow{} if httpRunnerResults.DurationHistogram != nil { - result.Durations = getHTTPHistogram(httpRunnerResults.DurationHistogram.Data, 1) - result.Statistics = getHTTPStatistics(httpRunnerResults.DurationHistogram, 1) + panel.Durations = getHTTPHistogram(httpRunnerResults.DurationHistogram.Data, 1) + panel.Statistics = getHTTPStatistics(httpRunnerResults.DurationHistogram, 1) } if httpRunnerResults.ErrorsDurationHistogram != nil { - result.ErrorDurations = getHTTPHistogram(httpRunnerResults.ErrorsDurationHistogram.Data, 1) - result.ErrorStatistics = getHTTPStatistics(httpRunnerResults.ErrorsDurationHistogram, 1) + panel.ErrorDurations = getHTTPHistogram(httpRunnerResults.ErrorsDurationHistogram.Data, 1) + panel.ErrorStatistics = getHTTPStatistics(httpRunnerResults.ErrorsDurationHistogram, 1) } - result.ReturnCodes = httpRunnerResults.RetCodes + panel.ReturnCodes = httpRunnerResults.RetCodes - return result + return panel } func getHTTPDashboardHelper(fortioResult util.FortioResult) httpDashboard { - // add endpoint results dashboard := httpDashboard{ Endpoints: map[string]httpEndpointRow{}, } @@ -528,8 +547,8 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { } // verify request (query parameter) + // required namespace and experiment name // Key: kt-result::my-namespace::my-experiment-name::my-endpoint - // Should namespace and experiment name come from application? namespace := r.URL.Query().Get("namespace") if namespace == "" { http.Error(w, "no namespace specified", http.StatusBadRequest) @@ -557,8 +576,6 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { return } - // TODO: should these functions belong in collect_http.go? Or be somewhere closeby? - // These functions are only for the purpose of processing the results of collect_http.go fortioResult := util.FortioResult{} err = json.Unmarshal(result, &fortioResult) if err != nil { @@ -581,3 +598,121 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") _, _ = w.Write(dashboardBytes) } + +func getGHZHistogram(ghzHistogram []runner.Bucket, decimalPlace float64) grafanaHistogram { + grafanaHistogram := grafanaHistogram{} + + for _, bucket := range ghzHistogram { + grafanaHistogram = append(grafanaHistogram, grafanaHistogramBucket{ + Version: "0", + Bucket: fmt.Sprint(roundDecimal(bucket.Mark*1000, 3)), + Value: float64(bucket.Count), + }) + } + + return grafanaHistogram +} + +func getGHZStatistics(ghzRunnerReport runner.Report) ghzStatistics { + // populate error count & rate + ec := float64(0) + for _, count := range ghzRunnerReport.ErrorDist { + ec += float64(count) + } + + return ghzStatistics{ + Count: ghzRunnerReport.Count, + ErrorCount: ec, + } +} + +func getGHZEndpointPanel(ghzRunnerReport runner.Report) ghzEndpointPanel { + panel := ghzEndpointPanel{} + + if ghzRunnerReport.Histogram != nil { + panel.Durations = getGHZHistogram(ghzRunnerReport.Histogram, 3) + panel.Statistics = getGHZStatistics(ghzRunnerReport) + } + + panel.StatusCodeDistribution = ghzRunnerReport.StatusCodeDist + + return panel +} + +func getGHZDashboardHelper(ghzResult util.GHZResult) ghzDashboard { + dashboard := ghzDashboard{ + Endpoints: map[string]ghzEndpointPanel{}, + } + + for endpoint, endpointResult := range ghzResult.EndpointResults { + endpointResult := endpointResult + dashboard.Endpoints[endpoint] = getGHZEndpointPanel(endpointResult) + } + + dashboard.Summary = ghzResult.Summary + + return dashboard +} + +func getGHZDashboard(w http.ResponseWriter, r *http.Request) { + log.Logger.Trace("getGHZDashboard called") + defer log.Logger.Trace("getGHZDashboard completed") + + // verify method + if r.Method != http.MethodGet { + http.Error(w, "expected GET", http.StatusMethodNotAllowed) + return + } + + // verify request (query parameter) + // required namespace and experiment name + // Key: kt-result::my-namespace::my-experiment-name::my-endpoint + namespace := r.URL.Query().Get("namespace") + if namespace == "" { + http.Error(w, "no namespace specified", http.StatusBadRequest) + return + } + + experiment := r.URL.Query().Get("experiment") + if experiment == "" { + http.Error(w, "no experiment specified", http.StatusBadRequest) + return + } + + log.Logger.Tracef("getGHZDashboard called for namespace %s and experiment %s", namespace, experiment) + + // get result from metrics client + if abn.MetricsClient == nil { + http.Error(w, "no metrics client", http.StatusInternalServerError) + return + } + result, err := abn.MetricsClient.GetResult(namespace, experiment) + if err != nil { + errorMessage := fmt.Sprintf("cannot get result with namespace %s, experiment %s", namespace, experiment) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + + ghzResult := util.GHZResult{} + err = json.Unmarshal(result, &ghzResult) + if err != nil { + errorMessage := fmt.Sprintf("cannot JSON unmarshal result into GHZResult: \"%s\"", string(result)) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusInternalServerError) + return + } + + // JSON marshal the dashboard + dashboardBytes, err := json.Marshal(getGHZDashboardHelper(ghzResult)) + if err != nil { + errorMessage := "cannot JSON marshal ghz dashboard" + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusInternalServerError) + return + } + + // finally, send response + w.Header().Add("Content-Type", "application/json") + _, _ = w.Write(dashboardBytes) +} diff --git a/metrics/server_test.go b/metrics/server_test.go index b784b8751..d2c3385a2 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -16,7 +16,6 @@ import ( "testing" "time" - fstats "fortio.org/fortio/stats" "github.com/dgraph-io/badger/v4" "github.com/iter8-tools/iter8/abn" util "github.com/iter8-tools/iter8/base" @@ -341,157 +340,9 @@ func getTestRM(namespace, name string) *testroutemap { } -func TestGetHTTPHistogram(t *testing.T) { - data := []fstats.Bucket{ - { - Interval: fstats.Interval{ - Start: 0.005229875, - End: 0.006, - }, - Percent: 2, - Count: 2, - }, - { - Interval: fstats.Interval{ - Start: 0.006, - End: 0.007, - }, - Percent: 5, - Count: 3, - }, - { - Interval: fstats.Interval{ - Start: 0.007, - End: 0.008, - }, - Percent: 6, - Count: 1, - }, - { - Interval: fstats.Interval{ - Start: 0.009000000000000001, - End: 0.01, - }, - Percent: 7, - Count: 1, - }, - { - Interval: fstats.Interval{ - Start: 0.01, - End: 0.011, - }, - Percent: 12, - Count: 5, - }, - { - Interval: fstats.Interval{ - Start: 0.011, - End: 0.012, - }, - Percent: 15, - Count: 3, - }, - { - Interval: fstats.Interval{ - Start: 0.012, - End: 0.014, - }, - Percent: 22, - Count: 7, - }, - { - Interval: fstats.Interval{ - Start: 0.014, - End: 0.016, - }, - Percent: 26, - Count: 4, - }, - { - Interval: fstats.Interval{ - Start: 0.016, - End: 0.018000000000000002, - }, - Percent: 37, - Count: 11, - }, - { - Interval: fstats.Interval{ - Start: 0.018000000000000002, - End: 0.02, - }, - Percent: 42, - Count: 5, - }, - { - Interval: fstats.Interval{ - Start: 0.02, - End: 0.025, - }, - Percent: 57, - Count: 15, - }, - { - Interval: fstats.Interval{ - Start: 0.025, - End: 0.03, - }, - Percent: 70, - Count: 13, - }, - { - Interval: fstats.Interval{ - Start: 0.03, - End: 0.035, - }, - Percent: 79, - Count: 9, - }, - { - Interval: fstats.Interval{ - Start: 0.035, - End: 0.04, - }, - Percent: 86, - Count: 7, - }, - { - Interval: fstats.Interval{ - Start: 0.04, - End: 0.045, - }, - Percent: 95, - Count: 9, - }, - { - Interval: fstats.Interval{ - Start: 0.045, - End: 0.05, - }, - Percent: 97, - Count: 2, - }, - { - Interval: fstats.Interval{ - Start: 0.05, - End: 0.051404375, - }, - Percent: 100, - Count: 3, - }, - } - - histogram := getHTTPHistogram(data, 1) - - histogramJSON, _ := json.Marshal(histogram) - fmt.Println(string(histogramJSON)) -} - func TestGetHTTPDashboardHelper(t *testing.T) { - result := "{\"EndpointResults\":{\"http://httpbin.default/get\":{\"RunType\":\"HTTP\",\"Labels\":\"\",\"StartTime\":\"2023-07-21T14:00:40.134434969Z\",\"RequestedQPS\":\"8\",\"RequestedDuration\":\"exactly 100 calls\",\"ActualQPS\":7.975606391552989,\"ActualDuration\":12538231589,\"NumThreads\":4,\"Version\":\"1.57.3\",\"DurationHistogram\":{\"Count\":100,\"Min\":0.004223875,\"Max\":0.040490042,\"Sum\":1.5977100850000001,\"Avg\":0.015977100850000002,\"StdDev\":0.008340658047253256,\"Data\":[{\"Start\":0.004223875,\"End\":0.005,\"Percent\":5,\"Count\":5},{\"Start\":0.005,\"End\":0.006,\"Percent\":10,\"Count\":5},{\"Start\":0.006,\"End\":0.007,\"Percent\":14,\"Count\":4},{\"Start\":0.007,\"End\":0.008,\"Percent\":19,\"Count\":5},{\"Start\":0.008,\"End\":0.009000000000000001,\"Percent\":24,\"Count\":5},{\"Start\":0.009000000000000001,\"End\":0.01,\"Percent\":28,\"Count\":4},{\"Start\":0.01,\"End\":0.011,\"Percent\":33,\"Count\":5},{\"Start\":0.011,\"End\":0.012,\"Percent\":36,\"Count\":3},{\"Start\":0.012,\"End\":0.014,\"Percent\":48,\"Count\":12},{\"Start\":0.014,\"End\":0.016,\"Percent\":55,\"Count\":7},{\"Start\":0.016,\"End\":0.018000000000000002,\"Percent\":65,\"Count\":10},{\"Start\":0.018000000000000002,\"End\":0.02,\"Percent\":74,\"Count\":9},{\"Start\":0.02,\"End\":0.025,\"Percent\":85,\"Count\":11},{\"Start\":0.025,\"End\":0.03,\"Percent\":93,\"Count\":8},{\"Start\":0.03,\"End\":0.035,\"Percent\":98,\"Count\":5},{\"Start\":0.035,\"End\":0.04,\"Percent\":99,\"Count\":1},{\"Start\":0.04,\"End\":0.040490042,\"Percent\":100,\"Count\":1}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.014571428571428572},{\"Percentile\":75,\"Value\":0.020454545454545454},{\"Percentile\":90,\"Value\":0.028125},{\"Percentile\":95,\"Value\":0.032},{\"Percentile\":99,\"Value\":0.04},{\"Percentile\":99.9,\"Value\":0.0404410378}]},\"ErrorsDurationHistogram\":{\"Count\":0,\"Min\":0,\"Max\":0,\"Sum\":0,\"Avg\":0,\"StdDev\":0,\"Data\":null},\"Exactly\":100,\"Jitter\":false,\"Uniform\":false,\"NoCatchUp\":false,\"RunID\":0,\"AccessLoggerInfo\":\"\",\"ID\":\"2023-07-21-140040\",\"RetCodes\":{\"200\":100},\"IPCountMap\":{\"10.96.108.76:80\":4},\"Insecure\":false,\"MTLS\":false,\"CACert\":\"\",\"Cert\":\"\",\"Key\":\"\",\"UnixDomainSocket\":\"\",\"URL\":\"http://httpbin.default/get\",\"NumConnections\":1,\"Compression\":false,\"DisableFastClient\":false,\"HTTP10\":false,\"H2\":false,\"DisableKeepAlive\":false,\"AllowHalfClose\":false,\"FollowRedirects\":false,\"Resolve\":\"\",\"HTTPReqTimeOut\":3000000000,\"UserCredentials\":\"\",\"ContentType\":\"\",\"Payload\":null,\"MethodOverride\":\"\",\"LogErrors\":false,\"SequentialWarmup\":false,\"ConnReuseRange\":[0,0],\"NoResolveEachConn\":false,\"Offset\":0,\"Resolution\":0.001,\"Sizes\":{\"Count\":100,\"Min\":413,\"Max\":413,\"Sum\":41300,\"Avg\":413,\"StdDev\":0,\"Data\":[{\"Start\":413,\"End\":413,\"Percent\":100,\"Count\":100}]},\"HeaderSizes\":{\"Count\":100,\"Min\":230,\"Max\":230,\"Sum\":23000,\"Avg\":230,\"StdDev\":0,\"Data\":[{\"Start\":230,\"End\":230,\"Percent\":100,\"Count\":100}]},\"Sockets\":[1,1,1,1],\"SocketCount\":4,\"ConnectionStats\":{\"Count\":4,\"Min\":0.001385875,\"Max\":0.001724375,\"Sum\":0.006404583,\"Avg\":0.00160114575,\"StdDev\":0.00013101857565508474,\"Data\":[{\"Start\":0.001385875,\"End\":0.001724375,\"Percent\":100,\"Count\":4}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.0014987083333333332},{\"Percentile\":75,\"Value\":0.0016115416666666667},{\"Percentile\":90,\"Value\":0.0016792416666666667},{\"Percentile\":95,\"Value\":0.0017018083333333333},{\"Percentile\":99,\"Value\":0.0017198616666666668},{\"Percentile\":99.9,\"Value\":0.0017239236666666668}]},\"AbortOn\":0}},\"Summary\":{\"numVersions\":1,\"versionNames\":null,\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"units\":\"msec\",\"type\":\"Histogram\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"histMetricValues\":[{\"http/latency\":[{\"lower\":4.2238750000000005,\"upper\":5,\"count\":5},{\"lower\":5,\"upper\":6,\"count\":5},{\"lower\":6,\"upper\":7,\"count\":4},{\"lower\":7,\"upper\":8,\"count\":5},{\"lower\":8,\"upper\":9.000000000000002,\"count\":5},{\"lower\":9.000000000000002,\"upper\":10,\"count\":4},{\"lower\":10,\"upper\":11,\"count\":5},{\"lower\":11,\"upper\":12,\"count\":3},{\"lower\":12,\"upper\":14,\"count\":12},{\"lower\":14,\"upper\":16,\"count\":7},{\"lower\":16,\"upper\":18.000000000000004,\"count\":10},{\"lower\":18.000000000000004,\"upper\":20,\"count\":9},{\"lower\":20,\"upper\":25,\"count\":11},{\"lower\":25,\"upper\":30,\"count\":8},{\"lower\":30,\"upper\":35,\"count\":5},{\"lower\":35,\"upper\":40,\"count\":1},{\"lower\":40,\"upper\":40.490041999999995,\"count\":1}]}],\"SummaryMetricValues\":[{}]}}" - fortioResult := util.FortioResult{} - err := json.Unmarshal([]byte(result), &fortioResult) + err := json.Unmarshal([]byte(fortioResultJSON), &fortioResult) assert.NoError(t, err) dashboard := getHTTPDashboardHelper(fortioResult) @@ -501,7 +352,24 @@ func TestGetHTTPDashboardHelper(t *testing.T) { assert.Equal( t, - "{\"Endpoints\":{\"http://httpbin.default/get\":{\"Durations\":[{\"Version\":\"0\",\"Bucket\":\"4.2 - 5\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"5 - 6\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"6 - 7\",\"Value\":4},{\"Version\":\"0\",\"Bucket\":\"7 - 8\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"8 - 9\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"9 - 10\",\"Value\":4},{\"Version\":\"0\",\"Bucket\":\"10 - 11\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"11 - 12\",\"Value\":3},{\"Version\":\"0\",\"Bucket\":\"12 - 14\",\"Value\":12},{\"Version\":\"0\",\"Bucket\":\"14 - 16\",\"Value\":7},{\"Version\":\"0\",\"Bucket\":\"16 - 18\",\"Value\":10},{\"Version\":\"0\",\"Bucket\":\"18 - 20\",\"Value\":9},{\"Version\":\"0\",\"Bucket\":\"20 - 25\",\"Value\":11},{\"Version\":\"0\",\"Bucket\":\"25 - 30\",\"Value\":8},{\"Version\":\"0\",\"Bucket\":\"30 - 35\",\"Value\":5},{\"Version\":\"0\",\"Bucket\":\"35 - 40\",\"Value\":1},{\"Version\":\"0\",\"Bucket\":\"40 - 40.4\",\"Value\":1}],\"Statistics\":{\"Count\":100,\"Mean\":15.977100850000001,\"StdDev\":8.340658047253257,\"Min\":4.2238750000000005,\"Max\":40.490041999999995},\"Error durations\":[],\"Error statistics\":{\"Count\":0,\"Mean\":0,\"StdDev\":0,\"Min\":0,\"Max\":0},\"Return codes\":{\"200\":100}}},\"Summary\":{\"numVersions\":1,\"versionNames\":null,\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"units\":\"msec\",\"type\":\"Histogram\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"histMetricValues\":[{\"http/latency\":[{\"lower\":4.2238750000000005,\"upper\":5,\"count\":5},{\"lower\":5,\"upper\":6,\"count\":5},{\"lower\":6,\"upper\":7,\"count\":4},{\"lower\":7,\"upper\":8,\"count\":5},{\"lower\":8,\"upper\":9.000000000000002,\"count\":5},{\"lower\":9.000000000000002,\"upper\":10,\"count\":4},{\"lower\":10,\"upper\":11,\"count\":5},{\"lower\":11,\"upper\":12,\"count\":3},{\"lower\":12,\"upper\":14,\"count\":12},{\"lower\":14,\"upper\":16,\"count\":7},{\"lower\":16,\"upper\":18.000000000000004,\"count\":10},{\"lower\":18.000000000000004,\"upper\":20,\"count\":9},{\"lower\":20,\"upper\":25,\"count\":11},{\"lower\":25,\"upper\":30,\"count\":8},{\"lower\":30,\"upper\":35,\"count\":5},{\"lower\":35,\"upper\":40,\"count\":1},{\"lower\":40,\"upper\":40.490041999999995,\"count\":1}]}],\"SummaryMetricValues\":[{}]}}", + fortioDashboardJSON, + string(dashboardBytes), + ) +} + +func TestGetGHZDashboardHelper(t *testing.T) { + ghzResult := util.GHZResult{} + err := json.Unmarshal([]byte(ghzResultJSON), &ghzResult) + assert.NoError(t, err) + + dashboard := getGHZDashboardHelper(ghzResult) + + assert.NotNil(t, dashboard) + dashboardBytes, err := json.Marshal(dashboard) + assert.NoError(t, err) + assert.Equal( + t, + ghzDashboardJSON, string(dashboardBytes), ) } @@ -651,6 +519,506 @@ func TestGetHTTPDashboardMissingParameter(t *testing.T) { } } +const fortioResultJSON = `{ + "EndpointResults": { + "http://httpbin.default/get": { + "RunType": "HTTP", + "Labels": "", + "StartTime": "2023-07-21T14:00:40.134434969Z", + "RequestedQPS": "8", + "RequestedDuration": "exactly 100 calls", + "ActualQPS": 7.975606391552989, + "ActualDuration": 12538231589, + "NumThreads": 4, + "Version": "1.57.3", + "DurationHistogram": { + "Count": 100, + "Min": 0.004223875, + "Max": 0.040490042, + "Sum": 1.5977100850000001, + "Avg": 0.015977100850000002, + "StdDev": 0.008340658047253256, + "Data": [ + { + "Start": 0.004223875, + "End": 0.005, + "Percent": 5, + "Count": 5 + }, + { + "Start": 0.005, + "End": 0.006, + "Percent": 10, + "Count": 5 + }, + { + "Start": 0.006, + "End": 0.007, + "Percent": 14, + "Count": 4 + }, + { + "Start": 0.007, + "End": 0.008, + "Percent": 19, + "Count": 5 + }, + { + "Start": 0.008, + "End": 0.009000000000000001, + "Percent": 24, + "Count": 5 + }, + { + "Start": 0.009000000000000001, + "End": 0.01, + "Percent": 28, + "Count": 4 + }, + { + "Start": 0.01, + "End": 0.011, + "Percent": 33, + "Count": 5 + }, + { + "Start": 0.011, + "End": 0.012, + "Percent": 36, + "Count": 3 + }, + { + "Start": 0.012, + "End": 0.014, + "Percent": 48, + "Count": 12 + }, + { + "Start": 0.014, + "End": 0.016, + "Percent": 55, + "Count": 7 + }, + { + "Start": 0.016, + "End": 0.018000000000000002, + "Percent": 65, + "Count": 10 + }, + { + "Start": 0.018000000000000002, + "End": 0.02, + "Percent": 74, + "Count": 9 + }, + { + "Start": 0.02, + "End": 0.025, + "Percent": 85, + "Count": 11 + }, + { + "Start": 0.025, + "End": 0.03, + "Percent": 93, + "Count": 8 + }, + { + "Start": 0.03, + "End": 0.035, + "Percent": 98, + "Count": 5 + }, + { + "Start": 0.035, + "End": 0.04, + "Percent": 99, + "Count": 1 + }, + { + "Start": 0.04, + "End": 0.040490042, + "Percent": 100, + "Count": 1 + } + ], + "Percentiles": [ + { + "Percentile": 50, + "Value": 0.014571428571428572 + }, + { + "Percentile": 75, + "Value": 0.020454545454545454 + }, + { + "Percentile": 90, + "Value": 0.028125 + }, + { + "Percentile": 95, + "Value": 0.032 + }, + { + "Percentile": 99, + "Value": 0.04 + }, + { + "Percentile": 99.9, + "Value": 0.0404410378 + } + ] + }, + "ErrorsDurationHistogram": { + "Count": 0, + "Min": 0, + "Max": 0, + "Sum": 0, + "Avg": 0, + "StdDev": 0, + "Data": null + }, + "Exactly": 100, + "Jitter": false, + "Uniform": false, + "NoCatchUp": false, + "RunID": 0, + "AccessLoggerInfo": "", + "ID": "2023-07-21-140040", + "RetCodes": { + "200": 100 + }, + "IPCountMap": { + "10.96.108.76:80": 4 + }, + "Insecure": false, + "MTLS": false, + "CACert": "", + "Cert": "", + "Key": "", + "UnixDomainSocket": "", + "URL": "http://httpbin.default/get", + "NumConnections": 1, + "Compression": false, + "DisableFastClient": false, + "HTTP10": false, + "H2": false, + "DisableKeepAlive": false, + "AllowHalfClose": false, + "FollowRedirects": false, + "Resolve": "", + "HTTPReqTimeOut": 3000000000, + "UserCredentials": "", + "ContentType": "", + "Payload": null, + "MethodOverride": "", + "LogErrors": false, + "SequentialWarmup": false, + "ConnReuseRange": [ + 0, + 0 + ], + "NoResolveEachConn": false, + "Offset": 0, + "Resolution": 0.001, + "Sizes": { + "Count": 100, + "Min": 413, + "Max": 413, + "Sum": 41300, + "Avg": 413, + "StdDev": 0, + "Data": [ + { + "Start": 413, + "End": 413, + "Percent": 100, + "Count": 100 + } + ] + }, + "HeaderSizes": { + "Count": 100, + "Min": 230, + "Max": 230, + "Sum": 23000, + "Avg": 230, + "StdDev": 0, + "Data": [ + { + "Start": 230, + "End": 230, + "Percent": 100, + "Count": 100 + } + ] + }, + "Sockets": [ + 1, + 1, + 1, + 1 + ], + "SocketCount": 4, + "ConnectionStats": { + "Count": 4, + "Min": 0.001385875, + "Max": 0.001724375, + "Sum": 0.006404583, + "Avg": 0.00160114575, + "StdDev": 0.00013101857565508474, + "Data": [ + { + "Start": 0.001385875, + "End": 0.001724375, + "Percent": 100, + "Count": 4 + } + ], + "Percentiles": [ + { + "Percentile": 50, + "Value": 0.0014987083333333332 + }, + { + "Percentile": 75, + "Value": 0.0016115416666666667 + }, + { + "Percentile": 90, + "Value": 0.0016792416666666667 + }, + { + "Percentile": 95, + "Value": 0.0017018083333333333 + }, + { + "Percentile": 99, + "Value": 0.0017198616666666668 + }, + { + "Percentile": 99.9, + "Value": 0.0017239236666666668 + } + ] + }, + "AbortOn": 0 + } + }, + "Summary": { + "numVersions": 1, + "versionNames": null, + "metricsInfo": { + "http/latency": { + "description": "Latency Histogram", + "units": "msec", + "type": "Histogram" + }, + "http://httpbin.default/get/error-count": { + "description": "number of responses that were errors", + "type": "Counter" + }, + "http://httpbin.default/get/error-rate": { + "description": "fraction of responses that were errors", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-max": { + "description": "maximum of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-mean": { + "description": "mean of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-min": { + "description": "minimum of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-p50": { + "description": "50-th percentile of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-p75": { + "description": "75-th percentile of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-p90": { + "description": "90-th percentile of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-p95": { + "description": "95-th percentile of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-p99": { + "description": "99-th percentile of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-p99.9": { + "description": "99.9-th percentile of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/latency-stddev": { + "description": "standard deviation of observed latency values", + "units": "msec", + "type": "Gauge" + }, + "http://httpbin.default/get/request-count": { + "description": "number of requests sent", + "type": "Counter" + } + }, + "nonHistMetricValues": [ + { + "http://httpbin.default/get/error-count": [ + 0 + ], + "http://httpbin.default/get/error-rate": [ + 0 + ], + "http://httpbin.default/get/latency-max": [ + 40.490041999999995 + ], + "http://httpbin.default/get/latency-mean": [ + 15.977100850000001 + ], + "http://httpbin.default/get/latency-min": [ + 4.2238750000000005 + ], + "http://httpbin.default/get/latency-p50": [ + 14.571428571428571 + ], + "http://httpbin.default/get/latency-p75": [ + 20.454545454545453 + ], + "http://httpbin.default/get/latency-p90": [ + 28.125 + ], + "http://httpbin.default/get/latency-p95": [ + 32 + ], + "http://httpbin.default/get/latency-p99": [ + 40 + ], + "http://httpbin.default/get/latency-p99.9": [ + 40.441037800000004 + ], + "http://httpbin.default/get/latency-stddev": [ + 8.340658047253257 + ], + "http://httpbin.default/get/request-count": [ + 100 + ] + } + ], + "histMetricValues": [ + { + "http/latency": [ + { + "lower": 4.2238750000000005, + "upper": 5, + "count": 5 + }, + { + "lower": 5, + "upper": 6, + "count": 5 + }, + { + "lower": 6, + "upper": 7, + "count": 4 + }, + { + "lower": 7, + "upper": 8, + "count": 5 + }, + { + "lower": 8, + "upper": 9.000000000000002, + "count": 5 + }, + { + "lower": 9.000000000000002, + "upper": 10, + "count": 4 + }, + { + "lower": 10, + "upper": 11, + "count": 5 + }, + { + "lower": 11, + "upper": 12, + "count": 3 + }, + { + "lower": 12, + "upper": 14, + "count": 12 + }, + { + "lower": 14, + "upper": 16, + "count": 7 + }, + { + "lower": 16, + "upper": 18.000000000000004, + "count": 10 + }, + { + "lower": 18.000000000000004, + "upper": 20, + "count": 9 + }, + { + "lower": 20, + "upper": 25, + "count": 11 + }, + { + "lower": 25, + "upper": 30, + "count": 8 + }, + { + "lower": 30, + "upper": 35, + "count": 5 + }, + { + "lower": 35, + "upper": 40, + "count": 1 + }, + { + "lower": 40, + "upper": 40.490041999999995, + "count": 1 + } + ] + } + ], + "SummaryMetricValues": [ + {} + ] + } +}` + +const fortioDashboardJSON = `{"Endpoints":{"http://httpbin.default/get":{"Durations":[{"Version":"0","Bucket":"4.2 - 5","Value":5},{"Version":"0","Bucket":"5 - 6","Value":5},{"Version":"0","Bucket":"6 - 7","Value":4},{"Version":"0","Bucket":"7 - 8","Value":5},{"Version":"0","Bucket":"8 - 9","Value":5},{"Version":"0","Bucket":"9 - 10","Value":4},{"Version":"0","Bucket":"10 - 11","Value":5},{"Version":"0","Bucket":"11 - 12","Value":3},{"Version":"0","Bucket":"12 - 14","Value":12},{"Version":"0","Bucket":"14 - 16","Value":7},{"Version":"0","Bucket":"16 - 18","Value":10},{"Version":"0","Bucket":"18 - 20","Value":9},{"Version":"0","Bucket":"20 - 25","Value":11},{"Version":"0","Bucket":"25 - 30","Value":8},{"Version":"0","Bucket":"30 - 35","Value":5},{"Version":"0","Bucket":"35 - 40","Value":1},{"Version":"0","Bucket":"40 - 40.4","Value":1}],"Statistics":{"Count":100,"Mean":15.977100850000001,"StdDev":8.340658047253257,"Min":4.2238750000000005,"Max":40.490041999999995},"Error durations":[],"Error statistics":{"Count":0,"Mean":0,"StdDev":0,"Min":0,"Max":0},"Return codes":{"200":100}}},"Summary":{"numVersions":1,"versionNames":null,"metricsInfo":{"http/latency":{"description":"Latency Histogram","units":"msec","type":"Histogram"},"http://httpbin.default/get/error-count":{"description":"number of responses that were errors","type":"Counter"},"http://httpbin.default/get/error-rate":{"description":"fraction of responses that were errors","type":"Gauge"},"http://httpbin.default/get/latency-max":{"description":"maximum of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-mean":{"description":"mean of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-min":{"description":"minimum of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p50":{"description":"50-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p75":{"description":"75-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p90":{"description":"90-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p95":{"description":"95-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p99":{"description":"99-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p99.9":{"description":"99.9-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-stddev":{"description":"standard deviation of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/request-count":{"description":"number of requests sent","type":"Counter"}},"nonHistMetricValues":[{"http://httpbin.default/get/error-count":[0],"http://httpbin.default/get/error-rate":[0],"http://httpbin.default/get/latency-max":[40.490041999999995],"http://httpbin.default/get/latency-mean":[15.977100850000001],"http://httpbin.default/get/latency-min":[4.2238750000000005],"http://httpbin.default/get/latency-p50":[14.571428571428571],"http://httpbin.default/get/latency-p75":[20.454545454545453],"http://httpbin.default/get/latency-p90":[28.125],"http://httpbin.default/get/latency-p95":[32],"http://httpbin.default/get/latency-p99":[40],"http://httpbin.default/get/latency-p99.9":[40.441037800000004],"http://httpbin.default/get/latency-stddev":[8.340658047253257],"http://httpbin.default/get/request-count":[100]}],"histMetricValues":[{"http/latency":[{"lower":4.2238750000000005,"upper":5,"count":5},{"lower":5,"upper":6,"count":5},{"lower":6,"upper":7,"count":4},{"lower":7,"upper":8,"count":5},{"lower":8,"upper":9.000000000000002,"count":5},{"lower":9.000000000000002,"upper":10,"count":4},{"lower":10,"upper":11,"count":5},{"lower":11,"upper":12,"count":3},{"lower":12,"upper":14,"count":12},{"lower":14,"upper":16,"count":7},{"lower":16,"upper":18.000000000000004,"count":10},{"lower":18.000000000000004,"upper":20,"count":9},{"lower":20,"upper":25,"count":11},{"lower":25,"upper":30,"count":8},{"lower":30,"upper":35,"count":5},{"lower":35,"upper":40,"count":1},{"lower":40,"upper":40.490041999999995,"count":1}]}],"SummaryMetricValues":[{}]}}` + func TestGetHTTPDashboard(t *testing.T) { // instantiate metrics client tempDirPath := t.TempDir() @@ -659,8 +1027,7 @@ func TestGetHTTPDashboard(t *testing.T) { abn.MetricsClient = client // preload metric client with result - result := "{\"EndpointResults\":{\"http://httpbin.default/get\":{\"RunType\":\"HTTP\",\"Labels\":\"\",\"StartTime\":\"2023-07-21T14:00:40.134434969Z\",\"RequestedQPS\":\"8\",\"RequestedDuration\":\"exactly 100 calls\",\"ActualQPS\":7.975606391552989,\"ActualDuration\":12538231589,\"NumThreads\":4,\"Version\":\"1.57.3\",\"DurationHistogram\":{\"Count\":100,\"Min\":0.004223875,\"Max\":0.040490042,\"Sum\":1.5977100850000001,\"Avg\":0.015977100850000002,\"StdDev\":0.008340658047253256,\"Data\":[{\"Start\":0.004223875,\"End\":0.005,\"Percent\":5,\"Count\":5},{\"Start\":0.005,\"End\":0.006,\"Percent\":10,\"Count\":5},{\"Start\":0.006,\"End\":0.007,\"Percent\":14,\"Count\":4},{\"Start\":0.007,\"End\":0.008,\"Percent\":19,\"Count\":5},{\"Start\":0.008,\"End\":0.009000000000000001,\"Percent\":24,\"Count\":5},{\"Start\":0.009000000000000001,\"End\":0.01,\"Percent\":28,\"Count\":4},{\"Start\":0.01,\"End\":0.011,\"Percent\":33,\"Count\":5},{\"Start\":0.011,\"End\":0.012,\"Percent\":36,\"Count\":3},{\"Start\":0.012,\"End\":0.014,\"Percent\":48,\"Count\":12},{\"Start\":0.014,\"End\":0.016,\"Percent\":55,\"Count\":7},{\"Start\":0.016,\"End\":0.018000000000000002,\"Percent\":65,\"Count\":10},{\"Start\":0.018000000000000002,\"End\":0.02,\"Percent\":74,\"Count\":9},{\"Start\":0.02,\"End\":0.025,\"Percent\":85,\"Count\":11},{\"Start\":0.025,\"End\":0.03,\"Percent\":93,\"Count\":8},{\"Start\":0.03,\"End\":0.035,\"Percent\":98,\"Count\":5},{\"Start\":0.035,\"End\":0.04,\"Percent\":99,\"Count\":1},{\"Start\":0.04,\"End\":0.040490042,\"Percent\":100,\"Count\":1}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.014571428571428572},{\"Percentile\":75,\"Value\":0.020454545454545454},{\"Percentile\":90,\"Value\":0.028125},{\"Percentile\":95,\"Value\":0.032},{\"Percentile\":99,\"Value\":0.04},{\"Percentile\":99.9,\"Value\":0.0404410378}]},\"ErrorsDurationHistogram\":{\"Count\":0,\"Min\":0,\"Max\":0,\"Sum\":0,\"Avg\":0,\"StdDev\":0,\"Data\":null},\"Exactly\":100,\"Jitter\":false,\"Uniform\":false,\"NoCatchUp\":false,\"RunID\":0,\"AccessLoggerInfo\":\"\",\"ID\":\"2023-07-21-140040\",\"RetCodes\":{\"200\":100},\"IPCountMap\":{\"10.96.108.76:80\":4},\"Insecure\":false,\"MTLS\":false,\"CACert\":\"\",\"Cert\":\"\",\"Key\":\"\",\"UnixDomainSocket\":\"\",\"URL\":\"http://httpbin.default/get\",\"NumConnections\":1,\"Compression\":false,\"DisableFastClient\":false,\"HTTP10\":false,\"H2\":false,\"DisableKeepAlive\":false,\"AllowHalfClose\":false,\"FollowRedirects\":false,\"Resolve\":\"\",\"HTTPReqTimeOut\":3000000000,\"UserCredentials\":\"\",\"ContentType\":\"\",\"Payload\":null,\"MethodOverride\":\"\",\"LogErrors\":false,\"SequentialWarmup\":false,\"ConnReuseRange\":[0,0],\"NoResolveEachConn\":false,\"Offset\":0,\"Resolution\":0.001,\"Sizes\":{\"Count\":100,\"Min\":413,\"Max\":413,\"Sum\":41300,\"Avg\":413,\"StdDev\":0,\"Data\":[{\"Start\":413,\"End\":413,\"Percent\":100,\"Count\":100}]},\"HeaderSizes\":{\"Count\":100,\"Min\":230,\"Max\":230,\"Sum\":23000,\"Avg\":230,\"StdDev\":0,\"Data\":[{\"Start\":230,\"End\":230,\"Percent\":100,\"Count\":100}]},\"Sockets\":[1,1,1,1],\"SocketCount\":4,\"ConnectionStats\":{\"Count\":4,\"Min\":0.001385875,\"Max\":0.001724375,\"Sum\":0.006404583,\"Avg\":0.00160114575,\"StdDev\":0.00013101857565508474,\"Data\":[{\"Start\":0.001385875,\"End\":0.001724375,\"Percent\":100,\"Count\":4}],\"Percentiles\":[{\"Percentile\":50,\"Value\":0.0014987083333333332},{\"Percentile\":75,\"Value\":0.0016115416666666667},{\"Percentile\":90,\"Value\":0.0016792416666666667},{\"Percentile\":95,\"Value\":0.0017018083333333333},{\"Percentile\":99,\"Value\":0.0017198616666666668},{\"Percentile\":99.9,\"Value\":0.0017239236666666668}]},\"AbortOn\":0}},\"Summary\":{\"numVersions\":1,\"versionNames\":null,\"metricsInfo\":{\"http/latency\":{\"description\":\"Latency Histogram\",\"units\":\"msec\",\"type\":\"Histogram\"},\"http://httpbin.default/get/error-count\":{\"description\":\"number of responses that were errors\",\"type\":\"Counter\"},\"http://httpbin.default/get/error-rate\":{\"description\":\"fraction of responses that were errors\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-max\":{\"description\":\"maximum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-mean\":{\"description\":\"mean of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-min\":{\"description\":\"minimum of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p50\":{\"description\":\"50-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p75\":{\"description\":\"75-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p90\":{\"description\":\"90-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p95\":{\"description\":\"95-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99\":{\"description\":\"99-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-p99.9\":{\"description\":\"99.9-th percentile of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/latency-stddev\":{\"description\":\"standard deviation of observed latency values\",\"units\":\"msec\",\"type\":\"Gauge\"},\"http://httpbin.default/get/request-count\":{\"description\":\"number of requests sent\",\"type\":\"Counter\"}},\"nonHistMetricValues\":[{\"http://httpbin.default/get/error-count\":[0],\"http://httpbin.default/get/error-rate\":[0],\"http://httpbin.default/get/latency-max\":[40.490041999999995],\"http://httpbin.default/get/latency-mean\":[15.977100850000001],\"http://httpbin.default/get/latency-min\":[4.2238750000000005],\"http://httpbin.default/get/latency-p50\":[14.571428571428571],\"http://httpbin.default/get/latency-p75\":[20.454545454545453],\"http://httpbin.default/get/latency-p90\":[28.125],\"http://httpbin.default/get/latency-p95\":[32],\"http://httpbin.default/get/latency-p99\":[40],\"http://httpbin.default/get/latency-p99.9\":[40.441037800000004],\"http://httpbin.default/get/latency-stddev\":[8.340658047253257],\"http://httpbin.default/get/request-count\":[100]}],\"histMetricValues\":[{\"http/latency\":[{\"lower\":4.2238750000000005,\"upper\":5,\"count\":5},{\"lower\":5,\"upper\":6,\"count\":5},{\"lower\":6,\"upper\":7,\"count\":4},{\"lower\":7,\"upper\":8,\"count\":5},{\"lower\":8,\"upper\":9.000000000000002,\"count\":5},{\"lower\":9.000000000000002,\"upper\":10,\"count\":4},{\"lower\":10,\"upper\":11,\"count\":5},{\"lower\":11,\"upper\":12,\"count\":3},{\"lower\":12,\"upper\":14,\"count\":12},{\"lower\":14,\"upper\":16,\"count\":7},{\"lower\":16,\"upper\":18.000000000000004,\"count\":10},{\"lower\":18.000000000000004,\"upper\":20,\"count\":9},{\"lower\":20,\"upper\":25,\"count\":11},{\"lower\":25,\"upper\":30,\"count\":8},{\"lower\":30,\"upper\":35,\"count\":5},{\"lower\":35,\"upper\":40,\"count\":1},{\"lower\":40,\"upper\":40.490041999999995,\"count\":1}]}],\"SummaryMetricValues\":[{}]}}" - err = abn.MetricsClient.SetResult("default", "default", []byte(result)) + err = abn.MetricsClient.SetResult("default", "default", []byte(fortioResultJSON)) assert.NoError(t, err) w := httptest.NewRecorder() @@ -690,7 +1057,298 @@ func TestGetHTTPDashboard(t *testing.T) { assert.NoError(t, err) assert.Equal( t, - `{"Endpoints":{"http://httpbin.default/get":{"Durations":[{"Version":"0","Bucket":"4.2 - 5","Value":5},{"Version":"0","Bucket":"5 - 6","Value":5},{"Version":"0","Bucket":"6 - 7","Value":4},{"Version":"0","Bucket":"7 - 8","Value":5},{"Version":"0","Bucket":"8 - 9","Value":5},{"Version":"0","Bucket":"9 - 10","Value":4},{"Version":"0","Bucket":"10 - 11","Value":5},{"Version":"0","Bucket":"11 - 12","Value":3},{"Version":"0","Bucket":"12 - 14","Value":12},{"Version":"0","Bucket":"14 - 16","Value":7},{"Version":"0","Bucket":"16 - 18","Value":10},{"Version":"0","Bucket":"18 - 20","Value":9},{"Version":"0","Bucket":"20 - 25","Value":11},{"Version":"0","Bucket":"25 - 30","Value":8},{"Version":"0","Bucket":"30 - 35","Value":5},{"Version":"0","Bucket":"35 - 40","Value":1},{"Version":"0","Bucket":"40 - 40.4","Value":1}],"Statistics":{"Count":100,"Mean":15.977100850000001,"StdDev":8.340658047253257,"Min":4.2238750000000005,"Max":40.490041999999995},"Error durations":[],"Error statistics":{"Count":0,"Mean":0,"StdDev":0,"Min":0,"Max":0},"Return codes":{"200":100}}},"Summary":{"numVersions":1,"versionNames":null,"metricsInfo":{"http/latency":{"description":"Latency Histogram","units":"msec","type":"Histogram"},"http://httpbin.default/get/error-count":{"description":"number of responses that were errors","type":"Counter"},"http://httpbin.default/get/error-rate":{"description":"fraction of responses that were errors","type":"Gauge"},"http://httpbin.default/get/latency-max":{"description":"maximum of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-mean":{"description":"mean of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-min":{"description":"minimum of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p50":{"description":"50-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p75":{"description":"75-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p90":{"description":"90-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p95":{"description":"95-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p99":{"description":"99-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p99.9":{"description":"99.9-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-stddev":{"description":"standard deviation of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/request-count":{"description":"number of requests sent","type":"Counter"}},"nonHistMetricValues":[{"http://httpbin.default/get/error-count":[0],"http://httpbin.default/get/error-rate":[0],"http://httpbin.default/get/latency-max":[40.490041999999995],"http://httpbin.default/get/latency-mean":[15.977100850000001],"http://httpbin.default/get/latency-min":[4.2238750000000005],"http://httpbin.default/get/latency-p50":[14.571428571428571],"http://httpbin.default/get/latency-p75":[20.454545454545453],"http://httpbin.default/get/latency-p90":[28.125],"http://httpbin.default/get/latency-p95":[32],"http://httpbin.default/get/latency-p99":[40],"http://httpbin.default/get/latency-p99.9":[40.441037800000004],"http://httpbin.default/get/latency-stddev":[8.340658047253257],"http://httpbin.default/get/request-count":[100]}],"histMetricValues":[{"http/latency":[{"lower":4.2238750000000005,"upper":5,"count":5},{"lower":5,"upper":6,"count":5},{"lower":6,"upper":7,"count":4},{"lower":7,"upper":8,"count":5},{"lower":8,"upper":9.000000000000002,"count":5},{"lower":9.000000000000002,"upper":10,"count":4},{"lower":10,"upper":11,"count":5},{"lower":11,"upper":12,"count":3},{"lower":12,"upper":14,"count":12},{"lower":14,"upper":16,"count":7},{"lower":16,"upper":18.000000000000004,"count":10},{"lower":18.000000000000004,"upper":20,"count":9},{"lower":20,"upper":25,"count":11},{"lower":25,"upper":30,"count":8},{"lower":30,"upper":35,"count":5},{"lower":35,"upper":40,"count":1},{"lower":40,"upper":40.490041999999995,"count":1}]}],"SummaryMetricValues":[{}]}}`, + fortioDashboardJSON, + string(body), + ) +} + +func TestGetGHZDashboardInvalidMethod(t *testing.T) { + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, util.PerformanceResultPath, nil) + putResult(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) +} + +func TestGetGHZDashboardMissingParameter(t *testing.T) { + tests := []struct { + queryParams url.Values + expectedStatusCode int + }{ + { + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "namespace": {"default"}, + }, + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "experiment": {"default"}, + }, + expectedStatusCode: http.StatusBadRequest, + }, + } + + for _, test := range tests { + w := httptest.NewRecorder() + + u, err := url.ParseRequestURI(util.PerformanceResultPath) + assert.NoError(t, err) + u.RawQuery = test.queryParams.Encode() + urlStr := fmt.Sprintf("%v", u) + + req := httptest.NewRequest(http.MethodPut, urlStr, nil) + + putResult(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + + assert.Equal(t, test.expectedStatusCode, res.StatusCode) + } +} + +const ghzResultJSON = `{ + "EndpointResults": { + "routeguide.RouteGuide.GetFeature": { + "date": "2023-07-17T12:23:56Z", + "endReason": "normal", + "options": { + "call": "routeguide.RouteGuide.GetFeature", + "host": "routeguide.default:50051", + "proto": "/tmp/ghz.proto", + "import-paths": [ + "/tmp", + "." + ], + "insecure": true, + "load-schedule": "const", + "load-start": 0, + "load-end": 0, + "load-step": 0, + "load-step-duration": 0, + "load-max-duration": 0, + "concurrency": 50, + "concurrency-schedule": "const", + "concurrency-start": 1, + "concurrency-end": 0, + "concurrency-step": 0, + "concurrency-step-duration": 0, + "concurrency-max-duration": 0, + "total": 200, + "connections": 1, + "dial-timeout": 10000000000, + "data": { + "latitude": 407838351, + "longitude": -746143763 + }, + "binary": false, + "CPUs": 5, + "count-errors": true + }, + "count": 200, + "total": 592907667, + "average": 25208185, + "fastest": 32375, + "slowest": 195740917, + "rps": 337.3206506368217, + "errorDistribution": { + "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"": 200 + }, + "statusCodeDistribution": { + "Unavailable": 200 + }, + "latencyDistribution": [ + { + "percentage": 10, + "latency": 35584 + }, + { + "percentage": 25, + "latency": 39958 + }, + { + "percentage": 50, + "latency": 86208 + }, + { + "percentage": 75, + "latency": 12777625 + }, + { + "percentage": 90, + "latency": 106714334 + }, + { + "percentage": 95, + "latency": 189847000 + }, + { + "percentage": 99, + "latency": 195400792 + } + ], + "histogram": [ + { + "mark": 0.000032375, + "count": 1, + "frequency": 0.005 + }, + { + "mark": 0.0196032292, + "count": 167, + "frequency": 0.835 + }, + { + "mark": 0.0391740834, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.05874493759999999, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.07831579179999999, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.097886646, + "count": 3, + "frequency": 0.015 + }, + { + "mark": 0.11745750019999998, + "count": 13, + "frequency": 0.065 + }, + { + "mark": 0.1370283544, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.15659920859999998, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.17617006279999997, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.195740917, + "count": 16, + "frequency": 0.08 + } + ], + "details": [ + { + "timestamp": "2023-07-17T12:23:56.089998719Z", + "latency": 14490041, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.090471886Z", + "latency": 13759125, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.090528678Z", + "latency": 194468542, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.090079886Z", + "latency": 105031291, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.090224928Z", + "latency": 100337083, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.091097053Z", + "latency": 12463750, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.091135844Z", + "latency": 12603875, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.478469636Z", + "latency": 86208, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + } + ] + } + } +}` + +const ghzDashboardJSON = `{"Endpoints":{"routeguide.RouteGuide.GetFeature":{"Durations":[{"Version":"0","Bucket":"0.032","Value":1},{"Version":"0","Bucket":"19.603","Value":167},{"Version":"0","Bucket":"39.174","Value":0},{"Version":"0","Bucket":"58.744","Value":0},{"Version":"0","Bucket":"78.315","Value":0},{"Version":"0","Bucket":"97.886","Value":3},{"Version":"0","Bucket":"117.457","Value":13},{"Version":"0","Bucket":"137.028","Value":0},{"Version":"0","Bucket":"156.599","Value":0},{"Version":"0","Bucket":"176.17","Value":0},{"Version":"0","Bucket":"195.74","Value":16}],"Statistics":{"Count":200,"ErrorCount":200},"Status codes":{"Unavailable":200}}},"Summary":{"numVersions":0,"versionNames":null,"SummaryMetricValues":null}}` + +func TestGetGHZDashboard(t *testing.T) { + // instantiate metrics client + tempDirPath := t.TempDir() + client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) + assert.NoError(t, err) + abn.MetricsClient = client + + // preload metric client with result + err = abn.MetricsClient.SetResult("default", "default", []byte(ghzResultJSON)) + assert.NoError(t, err) + + w := httptest.NewRecorder() + + // construct inputs to getGHZDashboard + u, err := url.ParseRequestURI(util.PerformanceResultPath) + assert.NoError(t, err) + params := url.Values{ + "namespace": {"default"}, + "experiment": {"default"}, + } + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) + + req := httptest.NewRequest(http.MethodGet, urlStr, nil) + + // get ghz dashboard based on result in metrics client + getGHZDashboard(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + + // check the ghz dashboard + body, err := io.ReadAll(res.Body) + assert.NoError(t, err) + assert.Equal( + t, + ghzDashboardJSON, string(body), ) } From 2e955f5cc8244fc7e5911634d964204bcc26d54e Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 28 Jul 2023 08:46:52 -0400 Subject: [PATCH 014/121] Rename functions Signed-off-by: Alan Cha --- metrics/server.go | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/metrics/server.go b/metrics/server.go index acd53eb83..810805f07 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -65,7 +65,7 @@ type metricSummary struct { SummaryOverUsers []*versionSummarizedMetric } -// httpEndpointRow is the data needed to produce a single row in the Iter8 Grafana dashboard +// httpEndpointRow is the data needed to produce a single row for an HTTP experiment in the Iter8 Grafana dashboard type httpEndpointRow struct { Durations grafanaHistogram Statistics storage.SummarizedMetric @@ -88,7 +88,8 @@ type ghzStatistics struct { ErrorCount float64 } -type ghzEndpointPanel struct { +// ghzEndpointRow is the data needed to produce a single row for an gRPC experiment in the Iter8 Grafana dashboard +type ghzEndpointRow struct { Durations grafanaHistogram Statistics ghzStatistics StatusCodeDistribution map[string]int `json:"Status codes"` @@ -96,7 +97,7 @@ type ghzEndpointPanel struct { type ghzDashboard struct { // key is the endpoint - Endpoints map[string]ghzEndpointPanel + Endpoints map[string]ghzEndpointRow Summary util.Insights } @@ -443,20 +444,20 @@ func getHTTPStatistics(fortioHistogram *fstats.HistogramData, decimalPlace float } func getHTTPEndpointRow(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpointRow { - result := httpEndpointRow{} + row := httpEndpointRow{} if httpRunnerResults.DurationHistogram != nil { - panel.Durations = getHTTPHistogram(httpRunnerResults.DurationHistogram.Data, 1) - panel.Statistics = getHTTPStatistics(httpRunnerResults.DurationHistogram, 1) + row.Durations = getHTTPHistogram(httpRunnerResults.DurationHistogram.Data, 1) + row.Statistics = getHTTPStatistics(httpRunnerResults.DurationHistogram, 1) } if httpRunnerResults.ErrorsDurationHistogram != nil { - panel.ErrorDurations = getHTTPHistogram(httpRunnerResults.ErrorsDurationHistogram.Data, 1) - panel.ErrorStatistics = getHTTPStatistics(httpRunnerResults.ErrorsDurationHistogram, 1) + row.ErrorDurations = getHTTPHistogram(httpRunnerResults.ErrorsDurationHistogram.Data, 1) + row.ErrorStatistics = getHTTPStatistics(httpRunnerResults.ErrorsDurationHistogram, 1) } - panel.ReturnCodes = httpRunnerResults.RetCodes + row.ReturnCodes = httpRunnerResults.RetCodes - return panel + return row } func getHTTPDashboardHelper(fortioResult util.FortioResult) httpDashboard { @@ -626,27 +627,27 @@ func getGHZStatistics(ghzRunnerReport runner.Report) ghzStatistics { } } -func getGHZEndpointPanel(ghzRunnerReport runner.Report) ghzEndpointPanel { - panel := ghzEndpointPanel{} +func getGHZEndpointRow(ghzRunnerReport runner.Report) ghzEndpointRow { + row := ghzEndpointRow{} if ghzRunnerReport.Histogram != nil { - panel.Durations = getGHZHistogram(ghzRunnerReport.Histogram, 3) - panel.Statistics = getGHZStatistics(ghzRunnerReport) + row.Durations = getGHZHistogram(ghzRunnerReport.Histogram, 3) + row.Statistics = getGHZStatistics(ghzRunnerReport) } - panel.StatusCodeDistribution = ghzRunnerReport.StatusCodeDist + row.StatusCodeDistribution = ghzRunnerReport.StatusCodeDist - return panel + return row } func getGHZDashboardHelper(ghzResult util.GHZResult) ghzDashboard { dashboard := ghzDashboard{ - Endpoints: map[string]ghzEndpointPanel{}, + Endpoints: map[string]ghzEndpointRow{}, } for endpoint, endpointResult := range ghzResult.EndpointResults { endpointResult := endpointResult - dashboard.Endpoints[endpoint] = getGHZEndpointPanel(endpointResult) + dashboard.Endpoints[endpoint] = getGHZEndpointRow(endpointResult) } dashboard.Summary = ghzResult.Summary From c22d8bacf5c74762a4987a5a34eae457bb5ded6d Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 28 Jul 2023 08:54:25 -0400 Subject: [PATCH 015/121] Address Michael's comments Signed-off-by: Alan Cha --- base/collect_grpc.go | 10 +++++----- metrics/server.go | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index 9b1ddd6bc..c8612f6d1 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -59,7 +59,7 @@ type collectGRPCTask struct { // This data will be transformed into httpDashboard when getGHZGrafana is called type GHZResult struct { // key is the endpoint - EndpointResults map[string]runner.Report + EndpointResults map[string]*runner.Report Summary Insights } @@ -86,11 +86,11 @@ func (t *collectGRPCTask) validateInputs() error { } // resultForVersion collects gRPC test result for a given version -func (t *collectGRPCTask) resultForVersion() (map[string]runner.Report, error) { +func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) { // the main idea is to run ghz with proper options var err error - results := map[string]runner.Report{} + results := map[string]*runner.Report{} if len(t.With.Endpoints) > 0 { log.Logger.Trace("multiple endpoints") @@ -127,7 +127,7 @@ func (t *collectGRPCTask) resultForVersion() (map[string]runner.Report, error) { if t.With.Grafana { resultsKey = endpoint.Call } - results[resultsKey] = *igr + results[resultsKey] = igr } } else { // TODO: supply all the allowed options @@ -144,7 +144,7 @@ func (t *collectGRPCTask) resultForVersion() (map[string]runner.Report, error) { if t.With.Grafana { resultsKey = t.With.Call } - results[resultsKey] = *igr + results[resultsKey] = igr } return results, err diff --git a/metrics/server.go b/metrics/server.go index 810805f07..9efbcf807 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -614,7 +614,7 @@ func getGHZHistogram(ghzHistogram []runner.Bucket, decimalPlace float64) grafana return grafanaHistogram } -func getGHZStatistics(ghzRunnerReport runner.Report) ghzStatistics { +func getGHZStatistics(ghzRunnerReport *runner.Report) ghzStatistics { // populate error count & rate ec := float64(0) for _, count := range ghzRunnerReport.ErrorDist { @@ -627,7 +627,7 @@ func getGHZStatistics(ghzRunnerReport runner.Report) ghzStatistics { } } -func getGHZEndpointRow(ghzRunnerReport runner.Report) ghzEndpointRow { +func getGHZEndpointRow(ghzRunnerReport *runner.Report) ghzEndpointRow { row := ghzEndpointRow{} if ghzRunnerReport.Histogram != nil { From 121940a586d6fceb5e2fd27700f3363a4d31cf25 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 28 Jul 2023 11:47:56 -0400 Subject: [PATCH 016/121] Rename to getGRPCDashboard Signed-off-by: Alan Cha --- metrics/server.go | 20 ++++++++++---------- metrics/server_test.go | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/metrics/server.go b/metrics/server.go index 9efbcf807..e1ad96a30 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -122,7 +122,7 @@ func Start(stopCh <-chan struct{}) error { http.HandleFunc("/metrics", getMetrics) http.HandleFunc(util.PerformanceResultPath, putResult) http.HandleFunc("/httpDashboard", getHTTPDashboard) - http.HandleFunc("/ghzDashboard", getGHZDashboard) + http.HandleFunc("/grpcDashboard", getGRPCDashboard) // configure HTTP server server := &http.Server{ @@ -600,7 +600,7 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { _, _ = w.Write(dashboardBytes) } -func getGHZHistogram(ghzHistogram []runner.Bucket, decimalPlace float64) grafanaHistogram { +func getGRPCHistogram(ghzHistogram []runner.Bucket, decimalPlace float64) grafanaHistogram { grafanaHistogram := grafanaHistogram{} for _, bucket := range ghzHistogram { @@ -614,7 +614,7 @@ func getGHZHistogram(ghzHistogram []runner.Bucket, decimalPlace float64) grafana return grafanaHistogram } -func getGHZStatistics(ghzRunnerReport *runner.Report) ghzStatistics { +func getGRPCStatistics(ghzRunnerReport *runner.Report) ghzStatistics { // populate error count & rate ec := float64(0) for _, count := range ghzRunnerReport.ErrorDist { @@ -627,12 +627,12 @@ func getGHZStatistics(ghzRunnerReport *runner.Report) ghzStatistics { } } -func getGHZEndpointRow(ghzRunnerReport *runner.Report) ghzEndpointRow { +func getGRPCEndpointRow(ghzRunnerReport *runner.Report) ghzEndpointRow { row := ghzEndpointRow{} if ghzRunnerReport.Histogram != nil { - row.Durations = getGHZHistogram(ghzRunnerReport.Histogram, 3) - row.Statistics = getGHZStatistics(ghzRunnerReport) + row.Durations = getGRPCHistogram(ghzRunnerReport.Histogram, 3) + row.Statistics = getGRPCStatistics(ghzRunnerReport) } row.StatusCodeDistribution = ghzRunnerReport.StatusCodeDist @@ -640,14 +640,14 @@ func getGHZEndpointRow(ghzRunnerReport *runner.Report) ghzEndpointRow { return row } -func getGHZDashboardHelper(ghzResult util.GHZResult) ghzDashboard { +func getGRPCDashboardHelper(ghzResult util.GHZResult) ghzDashboard { dashboard := ghzDashboard{ Endpoints: map[string]ghzEndpointRow{}, } for endpoint, endpointResult := range ghzResult.EndpointResults { endpointResult := endpointResult - dashboard.Endpoints[endpoint] = getGHZEndpointRow(endpointResult) + dashboard.Endpoints[endpoint] = getGRPCEndpointRow(endpointResult) } dashboard.Summary = ghzResult.Summary @@ -655,7 +655,7 @@ func getGHZDashboardHelper(ghzResult util.GHZResult) ghzDashboard { return dashboard } -func getGHZDashboard(w http.ResponseWriter, r *http.Request) { +func getGRPCDashboard(w http.ResponseWriter, r *http.Request) { log.Logger.Trace("getGHZDashboard called") defer log.Logger.Trace("getGHZDashboard completed") @@ -705,7 +705,7 @@ func getGHZDashboard(w http.ResponseWriter, r *http.Request) { } // JSON marshal the dashboard - dashboardBytes, err := json.Marshal(getGHZDashboardHelper(ghzResult)) + dashboardBytes, err := json.Marshal(getGRPCDashboardHelper(ghzResult)) if err != nil { errorMessage := "cannot JSON marshal ghz dashboard" log.Logger.Error(errorMessage) diff --git a/metrics/server_test.go b/metrics/server_test.go index d2c3385a2..1ac657ddf 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -362,7 +362,7 @@ func TestGetGHZDashboardHelper(t *testing.T) { err := json.Unmarshal([]byte(ghzResultJSON), &ghzResult) assert.NoError(t, err) - dashboard := getGHZDashboardHelper(ghzResult) + dashboard := getGRPCDashboardHelper(ghzResult) assert.NotNil(t, dashboard) dashboardBytes, err := json.Marshal(dashboard) @@ -1336,7 +1336,7 @@ func TestGetGHZDashboard(t *testing.T) { req := httptest.NewRequest(http.MethodGet, urlStr, nil) // get ghz dashboard based on result in metrics client - getGHZDashboard(w, req) + getGRPCDashboard(w, req) res := w.Result() defer func() { err := res.Body.Close() From 7a81e0d91558294875e0ba4138d6a2a9d38ba650 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 28 Jul 2023 16:29:10 -0400 Subject: [PATCH 017/121] Delete assess, report, SLOs, rewards Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 4 +- .github/workflows/lintcharts2.yaml | 2 +- .github/workflows/testcharts.yaml | 4 + action/assert.go | 122 -------- action/assert_test.go | 34 --- action/doc.go | 5 +- action/report.go | 78 ----- action/report/common.go | 137 --------- action/report/doc.go | 3 - action/report/htmlreport.tpl | 202 ------------- action/report/htmlreporter.go | 174 ----------- action/report/report_test.go | 65 ----- action/report/test_helpers.go | 32 -- action/report/textreport.tpl | 30 -- action/report/textreporter.go | 195 ------------- action/report_test.go | 70 ----- action/run_test.go | 1 - autox/application.tpl | 39 --- autox/config.go | 74 ----- autox/config_test.go | 38 --- autox/doc.go | 2 - autox/informer.go | 451 ----------------------------- autox/informer_test.go | 366 ----------------------- autox/k8sclient.go | 71 ----- autox/k8sclient_test.go | 53 ---- autox/watcher.go | 91 ------ autox/watcher_test.go | 156 ---------- base/assess.go | 161 ---------- base/assess_test.go | 44 --- base/collect_grpc_test.go | 51 +--- base/experiment.go | 139 --------- base/experiment_test.go | 29 +- base/mock_qs_test.go | 49 +--- cmd/autox.go | 41 --- cmd/k.go | 6 - cmd/kassert.go | 71 ----- cmd/kassert_test.go | 98 ------- cmd/kreport.go | 48 --- cmd/kreport_test.go | 40 --- cmd/root.go | 3 - driver/filedriver_test.go | 2 +- driver/kubedriver_test.go | 2 +- 42 files changed, 19 insertions(+), 3264 deletions(-) delete mode 100644 action/assert.go delete mode 100644 action/assert_test.go delete mode 100644 action/report.go delete mode 100644 action/report/common.go delete mode 100644 action/report/doc.go delete mode 100644 action/report/htmlreport.tpl delete mode 100644 action/report/htmlreporter.go delete mode 100644 action/report/report_test.go delete mode 100644 action/report/test_helpers.go delete mode 100644 action/report/textreport.tpl delete mode 100644 action/report/textreporter.go delete mode 100644 action/report_test.go delete mode 100644 autox/application.tpl delete mode 100644 autox/config.go delete mode 100644 autox/config_test.go delete mode 100644 autox/doc.go delete mode 100644 autox/informer.go delete mode 100644 autox/informer_test.go delete mode 100644 autox/k8sclient.go delete mode 100644 autox/k8sclient_test.go delete mode 100644 autox/watcher.go delete mode 100644 autox/watcher_test.go delete mode 100644 base/assess.go delete mode 100644 base/assess_test.go delete mode 100644 cmd/autox.go delete mode 100644 cmd/kassert.go delete mode 100644 cmd/kassert_test.go delete mode 100644 cmd/kreport.go delete mode 100644 cmd/kreport_test.go diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 34c6bfa68..fe027a183 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -205,7 +205,7 @@ jobs: - name: k launch with readiness checks run: | iter8 k launch \ - --set "tasks={ready,http,assess}" \ + --set "tasks={ready,http}" \ --set ready.deploy="httpbin" \ --set ready.service="httpbin" \ --set ready.timeout=60s \ @@ -235,7 +235,7 @@ jobs: - name: k launch with readiness checks run: | iter8 k launch -n experiments \ - --set "tasks={ready,http,assess}" \ + --set "tasks={ready,http}" \ --set ready.deploy="httpbin" \ --set ready.service="httpbin" \ --set ready.timeout=60s \ diff --git a/.github/workflows/lintcharts2.yaml b/.github/workflows/lintcharts2.yaml index 3580379ad..940e8327d 100644 --- a/.github/workflows/lintcharts2.yaml +++ b/.github/workflows/lintcharts2.yaml @@ -33,7 +33,7 @@ jobs: if: steps.modified-files.outputs.any_modified == 'true' run: | helm template charts/iter8 \ - --set "tasks={ready,http,assess}" \ + --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ --set ready.timeout=60s \ diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index 1d5c19dff..67c090057 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -582,6 +582,10 @@ jobs: --set 'groups.httpbin.trigger.version=v1' \ --set 'groups.httpbin.trigger.resource=deployments' \ --set 'groups.httpbin.specs.iter8.name=iter8' \ + --set 'groups.httpbin.specs.iter8.values.tasks={ready,http}' \ + --set 'groups.httpbin.specs.iter8.values.ready.deploy=httpbin' \ + --set 'groups.httpbin.specs.iter8.values.ready.service=httpbin' \ + --set 'groups.httpbin.specs.iter8.values.ready.timeout=60s' \ --set 'groups.httpbin.specs.iter8.values.http.url=http://httpbin.default/get' \ --set 'groups.httpbin.specs.iter8.version=0.15.0' \ --set 'groups.httpbin.specs.iter8.values.runner=job' diff --git a/action/assert.go b/action/assert.go deleted file mode 100644 index 66f69a1e2..000000000 --- a/action/assert.go +++ /dev/null @@ -1,122 +0,0 @@ -package action - -import ( - "fmt" - "strings" - "time" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/base/log" - "github.com/iter8-tools/iter8/driver" -) - -const ( - // Completed states that the experiment is complete - Completed = "completed" - // NoFailure states that none of the tasks in the experiment have failed - NoFailure = "nofailure" - // SLOs states that all app versions participating in the experiment satisfy SLOs - SLOs = "slos" -) - -// AssertOpts are the options used for asserting experiment results -type AssertOpts struct { - // Timeout is the duration to wait for conditions to be satisfied - Timeout time.Duration - // Conditions are checked by assert - Conditions []string - // RunOpts provides options relating to experiment resources - RunOpts -} - -// NewAssertOpts initializes and returns assert opts -func NewAssertOpts(kd *driver.KubeDriver) *AssertOpts { - return &AssertOpts{ - RunOpts: *NewRunOpts(kd), - } -} - -// KubeRun asserts conditions for a Kubernetes experiment -func (aOpts *AssertOpts) KubeRun() (bool, error) { - if err := aOpts.KubeDriver.Init(); err != nil { - return false, err - } - - return aOpts.Run(aOpts.KubeDriver) -} - -// Run builds the experiment and verifies assert conditions -func (aOpts *AssertOpts) Run(eio base.Driver) (bool, error) { - allGood, err := aOpts.verify(eio) - if err != nil { - return false, err - } - if !allGood { - log.Logger.Error("assert conditions failed") - return false, nil - } - return true, nil -} - -// verify implements the core logic of assert -func (aOpts *AssertOpts) verify(eio base.Driver) (bool, error) { - // timeSpent tracks how much time has been spent so far in assert attempts - var timeSpent, _ = time.ParseDuration("0s") - - // sleepTime specifies how long to sleep in between retries of asserts - var sleepTime, _ = time.ParseDuration("3s") - - // check assert conditions - for { - exp, err := base.BuildExperiment(eio) - if err != nil { - return false, err - } - - allGood := true - - for _, cond := range aOpts.Conditions { - if strings.ToLower(cond) == Completed { - c := exp.Completed() - allGood = allGood && c - if c { - log.Logger.Info("experiment completed") - } else { - log.Logger.Info("experiment did not complete") - } - } else if strings.ToLower(cond) == NoFailure { - nf := exp.NoFailure() - allGood = allGood && nf - if nf { - log.Logger.Info("experiment has no failure") - } else { - log.Logger.Info("experiment failed") - } - } else if strings.ToLower(cond) == SLOs { - slos := exp.SLOs() - allGood = allGood && slos - if slos { - log.Logger.Info("SLOs are satisfied") - } else { - log.Logger.Info("SLOs are not satisfied") - } - } else { - log.Logger.Error("unsupported assert condition detected; ", cond) - return false, fmt.Errorf("unsupported assert condition detected; %v", cond) - } - } - - if allGood { - log.Logger.Info("all conditions were satisfied") - return true, nil - } - if timeSpent >= aOpts.Timeout { - log.Logger.Info("not all conditions were satisfied") - return false, nil - } - log.Logger.Infof("sleeping %v ................................", sleepTime) - time.Sleep(sleepTime) - timeSpent += sleepTime - } - -} diff --git a/action/assert_test.go b/action/assert_test.go deleted file mode 100644 index 54d7cdc66..000000000 --- a/action/assert_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package action - -import ( - "context" - "os" - "testing" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestKubeAssert(t *testing.T) { - _ = os.Chdir(t.TempDir()) - // fix aOpts - aOpts := NewAssertOpts(driver.NewFakeKubeDriver(cli.New())) - aOpts.Conditions = []string{Completed, NoFailure, SLOs} - - byteArray, _ := os.ReadFile(base.CompletePath("../testdata/assertinputs", driver.ExperimentPath)) - _, _ = aOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - ok, err := aOpts.KubeRun() - assert.True(t, ok) - assert.NoError(t, err) -} diff --git a/action/doc.go b/action/doc.go index 6dbc69a6e..7ba4946fc 100644 --- a/action/doc.go +++ b/action/doc.go @@ -1,6 +1,5 @@ // Package action contains the logic for each action that Iter8 can perform. // -// This is a library for calling top-level Iter8 actions like 'launch', -// 'assert', or 'report'. Actions approximately match the command line -// invocations that the Iter8 CLI uses. +// This is a library for calling top-level Iter8 actions like 'launch'. +// Actions approximately match the command line invocations that the Iter8 CLI uses. package action diff --git a/action/report.go b/action/report.go deleted file mode 100644 index dc2c0061e..000000000 --- a/action/report.go +++ /dev/null @@ -1,78 +0,0 @@ -package action - -import ( - "fmt" - "io" - "strings" - - "github.com/iter8-tools/iter8/action/report" - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/base/log" - "github.com/iter8-tools/iter8/driver" -) - -const ( - // TextOutputFormatKey is the output format used to create text output - TextOutputFormatKey = "text" - - // HTMLOutputFormatKey is the output format used to create html output - HTMLOutputFormatKey = "html" -) - -// ReportOpts are the options used for generating reports from experiment result -type ReportOpts struct { - // OutputFormat specifies the output format to be used by report - OutputFormat string - // RunOpts enables fetching local experiment spec and result - RunOpts - // KubeDriver enables fetching Kubernetes experiment spec and result - *driver.KubeDriver -} - -// NewReportOpts initializes and returns report opts -func NewReportOpts(kd *driver.KubeDriver) *ReportOpts { - return &ReportOpts{ - RunOpts: RunOpts{ - RunDir: ".", - }, - OutputFormat: TextOutputFormatKey, - KubeDriver: kd, - } -} - -// KubeRun generates report for a Kubernetes experiment -func (rOpts *ReportOpts) KubeRun(out io.Writer) error { - if err := rOpts.KubeDriver.Init(); err != nil { - return err - } - return rOpts.Run(rOpts, out) -} - -// Run generates the text or HTML report -func (rOpts *ReportOpts) Run(eio base.Driver, out io.Writer) error { - var e *base.Experiment - var err error - if e, err = base.BuildExperiment(eio); err != nil { - return err - } - switch strings.ToLower(rOpts.OutputFormat) { - case TextOutputFormatKey: - reporter := report.TextReporter{ - Reporter: &report.Reporter{ - Experiment: e, - }, - } - return reporter.Gen(out) - case HTMLOutputFormatKey: - reporter := report.HTMLReporter{ - Reporter: &report.Reporter{ - Experiment: e, - }, - } - return reporter.Gen(out) - default: - e := fmt.Errorf("unsupported report format %v", rOpts.OutputFormat) - log.Logger.Error(e) - return e - } -} diff --git a/action/report/common.go b/action/report/common.go deleted file mode 100644 index d0ca8552d..000000000 --- a/action/report/common.go +++ /dev/null @@ -1,137 +0,0 @@ -package report - -import ( - "fmt" - "sort" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/base/log" -) - -// Reporter implements methods that are common to text and HTML reporting. -type Reporter struct { - // Experiment enables access to all base.Experiment data and methods - *base.Experiment -} - -// SortedScalarAndSLOMetrics extracts and sorts metric names from experiment. -// It looks for scalar metrics referenced in the MetricsInfo section, -// and also for scalar metrics referenced in SLOs. -func (r *Reporter) SortedScalarAndSLOMetrics() []string { - keys := []string{} - // add scalar and summary metrics referenced in MetricsInfo - for k, mm := range r.Result.Insights.MetricsInfo { - if mm.Type == base.CounterMetricType || mm.Type == base.GaugeMetricType { - keys = append(keys, k) - } - if mm.Type == base.SummaryMetricType { - for _, agg := range []base.AggregationType{ - base.CountAggregator, - base.MeanAggregator, - base.StdDevAggregator, - base.MinAggregator, - base.MaxAggregator} { - keys = append(keys, k+"/"+string(agg)) - } - } - } - // also add metrics referenced in SLOs - // only scalar metrics can feature in SLOs (for now) - if r.Result.Insights.SLOs != nil { - for _, v := range r.Result.Insights.SLOs.Upper { - nm, err := base.NormalizeMetricName(v.Metric) - if err == nil { - keys = append(keys, nm) - } - } - for _, v := range r.Result.Insights.SLOs.Lower { - nm, err := base.NormalizeMetricName(v.Metric) - if err == nil { - keys = append(keys, nm) - } - } - } - // remove duplicates - tmp := base.Uniq(keys) - uniqKeys := []string{} - for _, val := range tmp { - uniqKeys = append(uniqKeys, val.(string)) - } - // return sorted metrics - sort.Strings(uniqKeys) - return uniqKeys -} - -// ScalarMetricValueStr extracts value of a scalar metric (mn) for the given app version (j) -// Value is converted to string so that it can be printed in text and HTML reports. -func (r *Reporter) ScalarMetricValueStr(j int, mn string) string { - val := r.Result.Insights.ScalarMetricValue(j, mn) - if val != nil { - return fmt.Sprintf("%0.2f", *val) - } - return "unavailable" -} - -// MetricWithUnits provides the string representation of a metric name with units -func (r *Reporter) MetricWithUnits(metricName string) (string, error) { - in := r.Result.Insights - nm, err := base.NormalizeMetricName(metricName) - if err != nil { - return "", err - } - - m, err := in.GetMetricsInfo(nm) - if err != nil { - e := fmt.Errorf("unable to get metrics info for %v", nm) - log.Logger.Error(e) - return "", e - } - str := nm - if m.Units != nil { - str = fmt.Sprintf("%v (%v)", str, *m.Units) - } - return str, nil -} - -// GetBestVersions returns list of best versions for each metric -func (r *Reporter) GetBestVersions(metrics []string, in *base.Insights) []string { - results := make([]string, len(metrics)) - if in.Rewards == nil { - return results - } - - rewards := *in.Rewards - winners := *in.RewardsWinners - - for i, mn := range metrics { - j := indexString(rewards.Max, mn) - if j >= 0 { - if winners.Max[j] == -1 { - results[i] = "insufficient data" - } else { - results[i] = in.TrackVersionStr(winners.Max[j]) - } - } else { - j = indexString(rewards.Min, mn) - if j >= 0 { - if winners.Min[j] == -1 { - results[i] = "insufficient data" - } else { - results[i] = in.TrackVersionStr(winners.Min[j]) - } - } else { - results[i] = "n/a" - } - } - } - return results -} - -func indexString(keys []string, item string) int { - for i, key := range keys { - if key == item { - return i - } - } - return -1 -} diff --git a/action/report/doc.go b/action/report/doc.go deleted file mode 100644 index 124efe484..000000000 --- a/action/report/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package report contains primitives for reporting the results of an experiment. -// It supports text and HTML report formats. -package report diff --git a/action/report/htmlreport.tpl b/action/report/htmlreport.tpl deleted file mode 100644 index aecedee23..000000000 --- a/action/report/htmlreport.tpl +++ /dev/null @@ -1,202 +0,0 @@ - - - - - - - - - - - - Experiment Report - - - - - - - - - - - -
-

Iter8 Experiment Report

-
- - - - - - {{- if .Result.Insights }} - {{- if not (empty .Result.Insights.SLOs) }} -
-

Service level objectives (SLOs)

-

Whether or not SLOs are satisfied

-
- - - - - {{- if ge .Result.Insights.NumVersions 2 }} - {{- range until .Result.Insights.NumVersions }} - - {{- end}} - {{- else }} - - {{- end }} - - - - {{- range $ind, $slo := .Result.Insights.SLOs.Upper }} - - - {{- range (index $.Result.Insights.SLOsSatisfied.Upper $ind) }} - - {{- end }} - - {{- end}} - {{- range $ind, $slo := .Result.Insights.SLOs.Lower }} - - - {{- range (index $.Result.Insights.SLOsSatisfied.Lower $ind) }} - - {{- end }} - - {{- end}} - -
SLO Conditions{{ $.Result.Insights.TrackVersionStr . }}Satisfied
- - {{ $.MetricWithUnits $slo.Metric }} - - ≤ {{ $slo.Limit -}} - - -
- {{- $slo.Limit }} ≤ - - {{ $.MetricWithUnits $slo.Metric }} - - - -
-
- {{- end }} - - {{ if (.SortedVectorMetrics) }} -
-

Metric Histograms

-
- - {{- range $ind, $mn := .SortedVectorMetrics }} -
- - {{- end }} -
- {{- end }} - -
-

Latest observed values for metrics

-
- - - - - {{- if ge .Result.Insights.NumVersions 2 }} - {{- range until .Result.Insights.NumVersions }} - - {{- end}} - {{- if .Result.Insights.Rewards }} - - {{- end }} - {{- else }} - - {{- end }} - - - - {{- $metrics := .SortedScalarAndSLOMetrics }} - {{- $bestVersions := .GetBestVersions $metrics .Result.Insights }} - {{- range $ind, $mn := $metrics }} - - - {{- range until $.Result.Insights.NumVersions }} - - {{- end }} - {{- if $.Result.Insights.Rewards }} - - {{- end }} - - {{- end}} - -
Metric{{ $.Result.Insights.TrackVersionStr . }}BestValue
- - {{ $.MetricWithUnits $mn }} - - - {{ $.ScalarMetricValueStr . $mn }} - - {{ index $bestVersions $ind }} -
-
- {{- else }} -
-

Metrics-based Insights

-
-

Insights not found in experiment results. You may need to retry this report at a later time.

-
- {{- end }} - -
- - diff --git a/action/report/htmlreporter.go b/action/report/htmlreporter.go deleted file mode 100644 index 241e81a50..000000000 --- a/action/report/htmlreporter.go +++ /dev/null @@ -1,174 +0,0 @@ -package report - -import ( - "bytes" - "errors" - "fmt" - "io" - "math/rand" - "sort" - - htmlT "html/template" - - _ "embed" - - "github.com/Masterminds/sprig" - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/base/log" -) - -// HTMLReporter supports generation of HTML reports from experiments. -type HTMLReporter struct { - // Reporter enables access to all reporter data and methods - *Reporter -} - -// reportHTML is the HTML report template -// -//go:embed htmlreport.tpl -var reportHTML string - -// Gen creates an HTML report for a given experiment -func (ht *HTMLReporter) Gen(out io.Writer) error { - - // create HTML template - htpl, err := htmlT.New("report").Option("missingkey=error").Funcs(sprig.FuncMap()).Funcs(htmlT.FuncMap{ - "renderSLOSatisfiedHTML": renderSLOSatisfiedHTML, - "renderSLOSatisfiedCellClass": renderSLOSatisfiedCellClass, - }).Parse(reportHTML) - if err != nil { - e := errors.New("unable to parse HTML template") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - var b bytes.Buffer - if err = htpl.Execute(&b, ht); err != nil { - e := errors.New("unable to execute template") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - // print output - fmt.Fprintln(out, b.String()) - return nil -} - -// RenderStr is a helper method for rendering strings -// Used in HTML template -func (ht *HTMLReporter) RenderStr(what string) (string, error) { - var val string - var err error - switch what { - case "showClassStatus": - val = "show" - if ht.NoFailure() { - val = "" - } - case "textColorStatus": - val = "text-danger" - if ht.NoFailure() { - val = "text-success" - } - case "thumbsStatus": - val = "down" - if ht.NoFailure() { - val = "up" - } - case "msgStatus": - completionStatus := "Experiment completed." - if !ht.Completed() { - completionStatus = "Experiment has not completed." - } - failureStatus := "Experiment has failures." - if ht.NoFailure() { - failureStatus = "Experiment has no failures." - } - taskStatus := fmt.Sprintf("%v out of %v tasks are complete.", ht.Result.NumCompletedTasks, len(ht.Spec)) - loopStatus := fmt.Sprintf("%d loops have completed.", ht.Result.NumLoops) - val = fmt.Sprint(completionStatus) - val += " " - val += fmt.Sprint(failureStatus) - val += " " - val += fmt.Sprint(taskStatus) - val += " " - val += fmt.Sprint(loopStatus) - default: - err = fmt.Errorf("do not know how to render %v", what) - } - return val, err -} - -// MetricDescriptionHTML is used to described metrics in the metrics and SLO section of the HTML report -func (ht *HTMLReporter) MetricDescriptionHTML(metricName string) (string, error) { - in := ht.Result.Insights - nm, err := base.NormalizeMetricName(metricName) - if err != nil { - return "", err - } - - m, err := in.GetMetricsInfo(nm) - if err != nil { - e := fmt.Errorf("unable to get metrics info for %v", nm) - log.Logger.Error(e) - return "", e - } - return m.Description, nil -} - -// renderSLOSatisfiedHTML provides the HTML icon indicating if the SLO is satisfied -func renderSLOSatisfiedHTML(s bool) string { - if s { - return "fa-check-circle" - } - return "fa-times-circle" -} - -// renderSLOSatisfiedCellClass dictates the cell color indicating if the SLO is satisfied -func renderSLOSatisfiedCellClass(s bool) string { - if s { - return "text-success" - } - return "text-danger" -} - -// SortedVectorMetrics extracts vector metric names from experiment in sorted order -func (ht *HTMLReporter) SortedVectorMetrics() []string { - keys := []string{} - for k, mm := range ht.Result.Insights.MetricsInfo { - if mm.Type == base.HistogramMetricType || mm.Type == base.SampleMetricType { - keys = append(keys, k) - } - } - sort.Strings(keys) - return keys -} - -// sampleHist samples values from a histogram -func sampleHist(h []base.HistBucket) []float64 { - vals := []float64{} - for _, b := range h { - for i := 0; i < int(b.Count); i++ { - /* #nosec */ - vals = append(vals, b.Lower+(b.Upper-b.Lower)*rand.Float64()) - } - } - return vals -} - -// VectorMetricValue gets the value of the given vector metric for the given version -// If it is a histogram metric, then its values are sampled from the histogram -// Recall: VectorMetric can be a histogram metric or a sample metric. -func (ht *HTMLReporter) VectorMetricValue(i int, m string) []float64 { - in := ht.Result.Insights - mm, ok := in.MetricsInfo[m] - if !ok { - log.Logger.Error("could not find vector metric: ", m) - return nil - } - if mm.Type == base.SampleMetricType { - return in.NonHistMetricValues[i][m] - } - // this is a hist metric - return sampleHist(in.HistMetricValues[i][m]) -} diff --git a/action/report/report_test.go b/action/report/report_test.go deleted file mode 100644 index 90bd6e9d1..000000000 --- a/action/report/report_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package report - -import ( - "os" - "testing" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" -) - -func TestReportText(t *testing.T) { - _ = os.Chdir(t.TempDir()) - _ = copyFileToPwd(t, base.CompletePath("../../", "testdata/assertinputs/experiment.yaml")) - - fd := driver.FileDriver{ - RunDir: ".", - } - exp, err := base.BuildExperiment(&fd) - assert.NoError(t, err) - reporter := TextReporter{ - Reporter: &Reporter{ - Experiment: exp, - }, - } - err = reporter.Gen(os.Stdout) - assert.NoError(t, err) -} - -func TestReportTextWithLowerSLOs(t *testing.T) { - _ = os.Chdir(t.TempDir()) - _ = copyFileToPwd(t, base.CompletePath("../../", "testdata/assertinputs/experimentWithLowerSLOs.yaml")) - _ = os.Rename("experimentWithLowerSLOs.yaml", "experiment.yaml") - - fd := driver.FileDriver{ - RunDir: ".", - } - exp, err := base.BuildExperiment(&fd) - assert.NoError(t, err) - reporter := TextReporter{ - Reporter: &Reporter{ - Experiment: exp, - }, - } - err = reporter.Gen(os.Stdout) - assert.NoError(t, err) -} - -func TestReportHTMLWithLowerSLOs(t *testing.T) { - _ = os.Chdir(t.TempDir()) - _ = copyFileToPwd(t, base.CompletePath("../../", "testdata/assertinputs/experimentWithLowerSLOs.yaml")) - _ = os.Rename("experimentWithLowerSLOs.yaml", "experiment.yaml") - fd := driver.FileDriver{ - RunDir: ".", - } - exp, err := base.BuildExperiment(&fd) - assert.NoError(t, err) - reporter := HTMLReporter{ - Reporter: &Reporter{ - Experiment: exp, - }, - } - err = reporter.Gen(os.Stdout) - assert.NoError(t, err) -} diff --git a/action/report/test_helpers.go b/action/report/test_helpers.go deleted file mode 100644 index cdd1bd555..000000000 --- a/action/report/test_helpers.go +++ /dev/null @@ -1,32 +0,0 @@ -package report - -import ( - "errors" - "io" - "os" - "path/filepath" - "testing" -) - -// copyFileToPwd copies the specified file to pwd -func copyFileToPwd(t *testing.T, filePath string) error { - // get file - srcFile, err := os.Open(filepath.Clean(filePath)) - if err != nil { - return errors.New("could not open metrics file") - } - t.Cleanup(func() { - _ = srcFile.Close() - }) - - // create copy of file in pwd - destFile, err := os.Create(filepath.Base(filePath)) - if err != nil { - return errors.New("could not create copy of metrics file in temp directory") - } - t.Cleanup(func() { - _ = destFile.Close() - }) - _, _ = io.Copy(destFile, srcFile) - return nil -} diff --git a/action/report/textreport.tpl b/action/report/textreport.tpl deleted file mode 100644 index 5410455dc..000000000 --- a/action/report/textreport.tpl +++ /dev/null @@ -1,30 +0,0 @@ - -Experiment summary: -******************* - - Experiment completed: {{ .Completed }} - No task failures: {{ .NoFailure }} - Total number of tasks: {{ len .Spec }} - Number of completed tasks: {{ .Result.NumCompletedTasks }} - Number of completed loops: {{ .Result.NumLoops }} - -{{- if .Result.Insights }} -{{- if not (empty .Result.Insights.SLOs) }} - -Whether or not service level objectives (SLOs) are satisfied: -************************************************************* - -{{ .PrintSLOsText | indent 2 }} -{{- end }} - -Latest observed values for metrics: -*********************************** - -{{ .PrintMetricsText | indent 2 }} -{{- else }} - -Metrics-based Insights: -*********************** - - Insights not found in experiment results. You may need to retry this report at a later time. -{{- end }} diff --git a/action/report/textreporter.go b/action/report/textreporter.go deleted file mode 100644 index 1a6e9d9a4..000000000 --- a/action/report/textreporter.go +++ /dev/null @@ -1,195 +0,0 @@ -package report - -import ( - "bytes" - "errors" - "fmt" - "io" - "text/tabwriter" - textT "text/template" - - _ "embed" - - "github.com/Masterminds/sprig" - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/base/log" -) - -// TextReporter supports generation of text reports from experiments. -type TextReporter struct { - // Reporter is embedded and enables access to all reporter data and methods - *Reporter -} - -// reportText is the text report template -// -//go:embed textreport.tpl -var reportText string - -// Gen writes the text report for a given experiment into the given writer -func (tr *TextReporter) Gen(out io.Writer) error { - // create text template - ttpl, err := textT.New("report").Option("missingkey=error").Funcs(sprig.TxtFuncMap()).Parse(reportText) - if err != nil { - e := errors.New("unable to parse text template") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - var b bytes.Buffer - if err = ttpl.Execute(&b, tr); err != nil { - e := errors.New("unable to execute template") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - // print output - fmt.Fprintln(out, b.String()) - return nil -} - -// PrintSLOsText returns SLOs section of the text report as a string -func (tr *TextReporter) PrintSLOsText() string { - var b bytes.Buffer - w := tabwriter.NewWriter(&b, 0, 0, 1, ' ', tabwriter.Debug) - tr.printSLOsText(w) - return b.String() -} - -// getSLOStrText gets the text for an SLO -func (tr *TextReporter) getSLOStrText(i int, upper bool) (string, error) { - in := tr.Result.Insights - var slo base.SLO - if upper { - slo = in.SLOs.Upper[i] - } else { - slo = in.SLOs.Lower[i] - } - // get metric with units and description - str, err := tr.MetricWithUnits(slo.Metric) - if err != nil { - log.Logger.Error("unable to get slo metric with units") - return "", err - } - // add upper limit - if upper { - str = fmt.Sprintf("%v <= %v", str, slo.Limit) - } else { - // add lower limit - str = fmt.Sprintf("%v <= %v", slo.Limit, str) - } - return str, nil -} - -func (tr *TextReporter) printVersions(w *tabwriter.Writer) { - in := tr.Result.Insights - for i := 0; i < in.NumVersions; i++ { - fmt.Fprintf(w, "\t %s", in.TrackVersionStr(i)) - } -} - -// printSLOsText prints all SLOs into tab writer -func (tr *TextReporter) printSLOsText(w *tabwriter.Writer) { - in := tr.Result.Insights - fmt.Fprint(w, "SLO Conditions") - if in.NumVersions > 1 { - tr.printVersions(w) - } else { - fmt.Fprintf(w, "\t Satisfied") - } - fmt.Fprintln(w) - fmt.Fprint(w, "--------------") - for i := 0; i < in.NumVersions; i++ { - fmt.Fprint(w, "\t ---------") - } - fmt.Fprintln(w) - - if in.SLOs != nil { - log.Logger.Debug("SLOs are not nil") - log.Logger.Debug("found ", len(in.SLOs.Upper), " upper SLOs") - for i := 0; i < len(in.SLOs.Upper); i++ { - log.Logger.Debug("Upper SLO ", i) - str, err := tr.getSLOStrText(i, true) - if err == nil { - fmt.Fprint(w, str) - for j := 0; j < in.NumVersions; j++ { - fmt.Fprintf(w, "\t %v", in.SLOsSatisfied.Upper[i][j]) - } - fmt.Fprintln(w) - } else { - log.Logger.Error("unable to extract SLO text") - } - } - - log.Logger.Debug("found ", len(in.SLOs.Lower), " lower SLOs") - for i := 0; i < len(in.SLOs.Lower); i++ { - log.Logger.Debug("Lower SLO ", i) - str, err := tr.getSLOStrText(i, false) - if err == nil { - fmt.Fprint(w, str) - for j := 0; j < in.NumVersions; j++ { - fmt.Fprintf(w, "\t %v", in.SLOsSatisfied.Lower[i][j]) - } - fmt.Fprintln(w) - } else { - log.Logger.Error("unable to extract SLO text") - } - } - } - - _ = w.Flush() -} - -// PrintMetricsText returns metrics section of the text report as a string -func (tr *TextReporter) PrintMetricsText() string { - var b bytes.Buffer - w := tabwriter.NewWriter(&b, 0, 0, 1, ' ', tabwriter.Debug) - tr.printMetricsText(w) - return b.String() -} - -// printMetricsText prints metrics into tab writer -func (tr *TextReporter) printMetricsText(w *tabwriter.Writer) { - in := tr.Result.Insights - fmt.Fprint(w, "Metric") - if in.NumVersions > 1 { - tr.printVersions(w) - if in.Rewards != nil { - fmt.Fprintf(w, "\t Best") - } - } else { - fmt.Fprintf(w, "\t value") - } - fmt.Fprintln(w) - fmt.Fprint(w, "-------") - for i := 0; i < in.NumVersions; i++ { - fmt.Fprint(w, "\t -----") - } - if in.NumVersions > 1 && in.Rewards != nil { - fmt.Fprint(w, "\t ----") - } - fmt.Fprintln(w) - - // keys contain normalized scalar metric names in sorted order - keys := tr.SortedScalarAndSLOMetrics() - bestVersions := tr.GetBestVersions(keys, in) - - for i, mn := range keys { - mwu, err := tr.MetricWithUnits(mn) - if err == nil { - // add metric name with units - fmt.Fprint(w, mwu) - // add value - for j := 0; j < in.NumVersions; j++ { - fmt.Fprintf(w, "\t %v", tr.ScalarMetricValueStr(j, mn)) - } - if in.NumVersions > 1 && in.Rewards != nil { - fmt.Fprintf(w, "\t %s", bestVersions[i]) - } - fmt.Fprintln(w) - } else { - log.Logger.Error(err) - } - } - _ = w.Flush() -} diff --git a/action/report_test.go b/action/report_test.go deleted file mode 100644 index fe2a15f9c..000000000 --- a/action/report_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package action - -import ( - "context" - "os" - "testing" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestKubeReportText(t *testing.T) { - _ = os.Chdir(t.TempDir()) - // fix rOpts - rOpts := NewReportOpts(driver.NewFakeKubeDriver(cli.New())) - - byteArray, _ := os.ReadFile(base.CompletePath("../testdata/assertinputs", driver.ExperimentPath)) - _, _ = rOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - err := rOpts.KubeRun(os.Stdout) - assert.NoError(t, err) -} - -func TestKubeReportHTML(t *testing.T) { - _ = os.Chdir(t.TempDir()) - // fix rOpts - rOpts := NewReportOpts(driver.NewFakeKubeDriver(cli.New())) - rOpts.OutputFormat = HTMLOutputFormatKey - - byteArray, _ := os.ReadFile(base.CompletePath("../testdata/assertinputs", driver.ExperimentPath)) - _, _ = rOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - err := rOpts.KubeRun(os.Stdout) - assert.NoError(t, err) -} - -func TestKubeReportInvalid(t *testing.T) { - _ = os.Chdir(t.TempDir()) - // fix rOpts - rOpts := NewReportOpts(driver.NewFakeKubeDriver(cli.New())) - rOpts.OutputFormat = "invalid" - - byteArray, _ := os.ReadFile(base.CompletePath("../testdata/assertinputs", driver.ExperimentPath)) - _, _ = rOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - err := rOpts.KubeRun(os.Stdout) - assert.ErrorContains(t, err, "unsupported report format") -} diff --git a/action/run_test.go b/action/run_test.go index 96d8f7f69..51269d7c0 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -50,6 +50,5 @@ func TestKubeRun(t *testing.T) { assert.NoError(t, err) assert.True(t, exp.Completed()) assert.True(t, exp.NoFailure()) - assert.True(t, exp.SLOs()) assert.Equal(t, 4, exp.Result.NumCompletedTasks) } diff --git a/autox/application.tpl b/autox/application.tpl deleted file mode 100644 index 97a1b6f1c..000000000 --- a/autox/application.tpl +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - name: {{ .Name }} - namespace: argocd - ownerReferences: - - apiVersion: v1 - kind: Secret - name: {{ .Owner.Name }} - uid: {{ .Owner.UID }} - finalizers: - - resources-finalizer.argocd.argoproj.io - labels: - app.kubernetes.io/managed-by: iter8 -spec: - destination: - namespace: {{ .Namespace }} - server: https://kubernetes.default.svc - project: default - source: - chart: {{ .Chart.Name }} - helm: - values: | - {{ .Chart.Values | toYaml | indent 8 | trim }} - repoURL: https://iter8-tools.github.io/iter8 - targetRevision: {{ .Chart.Version }} - ignoreDifferences: - - kind: Secret - name: {{ .Name }} - namespace: {{ .Namespace }} - jsonPointers: - - /data - - /metadata - syncPolicy: - automated: - selfHeal: true - syncOptions: - - CreateNamespace=true - - RespectIgnoreDifferences=true \ No newline at end of file diff --git a/autox/config.go b/autox/config.go deleted file mode 100644 index 6073ae76d..000000000 --- a/autox/config.go +++ /dev/null @@ -1,74 +0,0 @@ -package autox - -// config.go - reading of configuration (list of resources/namespaces to watch) - -import ( - "os" - "path/filepath" - - "github.com/iter8-tools/iter8/base/log" - - "sigs.k8s.io/yaml" -) - -// trigger specifies a Kubernetes resource object. When this Kubernetes resource object is created/updated/deleted, then the releaseGroupSpecs will be created/deleted. -type trigger struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` - - Group string `json:"group,omitempty" yaml:"group,omitempty"` - - Version string `json:"version,omitempty" yaml:"version,omitempty"` - - Resource string `json:"resource,omitempty" yaml:"resource,omitempty"` -} - -// releaseSpec points to a particular Helm releaseSpec -type releaseSpec struct { - // Name is the name of the Helm chart - Name string `json:"name" yaml:"name"` - - // Values is the values of the Helm chart - Values map[string]interface{} `json:"values" yaml:"values"` - - // Version is the version of the Helm chart - Version string `json:"version" yaml:"version"` -} - -// releaseGroupSpec is the configuration of all the Helm charts for a particular experiment group and their install trigger -type releaseGroupSpec struct { - // Trigger defines when the ReleaseSpecs should be installed - Trigger trigger `json:"trigger" yaml:"trigger"` - - // ReleaseSpecs is the set of Helm charts - // the keys in ReleaseSpecs are identifiers for each releaseSpec (releaseSpecID) - ReleaseSpecs map[string]releaseSpec `json:"releaseSpecs" yaml:"releaseSpecs"` -} - -// config is the configuration for all the Helm charts and their triggers -type config struct { - // Specs contains the releaseGroupSpecs, which contain the Helm charts and their triggers - // the keys in Specs are identifiers for each releaseGroupSpec (releaseGroupSpecID) - Specs map[string]releaseGroupSpec -} - -// readConfig reads YAML autoX config file and converts to a config object -func readConfig(fn string) (c config) { - // empty configuration - c = config{} - - yfile, err := os.ReadFile(filepath.Clean(fn)) - if err != nil { - log.Logger.Warnf("unable to read configuration file %s: %s", fn, err.Error()) - return c // empty configuration - } - - err = yaml.Unmarshal(yfile, &c) - if err != nil { - log.Logger.Warnf("invalid configuration file %s: %s", fn, err.Error()) - return c // empty configuration - } - - return c -} diff --git a/autox/config_test.go b/autox/config_test.go deleted file mode 100644 index a83bc5d42..000000000 --- a/autox/config_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package autox - -import ( - "path/filepath" - "runtime" - "testing" - - "github.com/stretchr/testify/assert" -) - -// utility method -func completePath(prefix string, suffix string) string { - _, filename, _, _ := runtime.Caller(1) // one step up the call stack - return filepath.Join(filepath.Dir(filename), prefix, suffix) -} - -func TestReadConfig(t *testing.T) { - for _, tt := range []struct { - name string - file string - numSpecGroups int - }{ - {"empty", "config.empty.yaml", 0}, - {"invalid", "config.invalid.yaml", 0}, - {"garbage", "config.garbage.yaml", 0}, - {"nofile", "config.nofile.yaml", 0}, - } { - t.Run(tt.name, func(t *testing.T) { - c := readConfig(completePath("../testdata/autox_inputs", tt.file)) - assert.Equal(t, tt.numSpecGroups, len(c.Specs)) - }) - } - - c := readConfig(completePath("../testdata/autox_inputs", "config.example.yaml")) - assert.Equal(t, 2, len(c.Specs)) - assert.Equal(t, 2, len(c.Specs["myApp"].ReleaseSpecs)) - assert.Equal(t, 1, len(c.Specs["myApp2"].ReleaseSpecs)) -} diff --git a/autox/doc.go b/autox/doc.go deleted file mode 100644 index bbdb56e51..000000000 --- a/autox/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package autox is the entry point for the autoX controller. -package autox diff --git a/autox/informer.go b/autox/informer.go deleted file mode 100644 index b3745299b..000000000 --- a/autox/informer.go +++ /dev/null @@ -1,451 +0,0 @@ -package autox - -// informer.go - informer(s) to watch desired resources/namespaces - -import ( - "bytes" - "context" - _ "embed" - "errors" - "fmt" - "reflect" - "sync" - "time" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/base/log" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/retry" - "sigs.k8s.io/yaml" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // this label is used in secrets (to allow for ownership over the applications) - // label is set to the name of a release group spec (releaseGroupSpecName) - // there is a 1:1 mapping of secrets to release group specs - autoXGroupLabel = "iter8.tools/autox-group" - - iter8 = "iter8" - argocd = "argocd" - - autoXAdditionalValues = "autoXAdditionalValues" - - nameLabel = "app.kubernetes.io/name" - versionLabel = "app.kubernetes.io/version" - managedByLabel = "app.kubernetes.io/managed-by" - trackLabel = "iter8.tools/track" - - timeout = 15 * time.Second - interval = 1 * time.Second -) - -var applicationGVR = schema.GroupVersionResource{ - Group: "argoproj.io", - Version: "v1alpha1", - Resource: "applications", -} - -var applicationValuesPath = []string{"spec", "source", "helm", "values"} - -var m sync.Mutex - -//go:embed application.tpl -var tplStr string - -type chartAction int64 - -const ( - applyAction chartAction = 0 - deleteAction chartAction = 1 -) - -type owner struct { - Name string `json:"name" yaml:"name"` - UID string `json:"uid" yaml:"uid"` -} - -// applicationValues is the values for the (Argo CD) application template -type applicationValues struct { - // Name is the name of the application - Name string `json:"name" yaml:"name"` - - // Namespace is the namespace of the application - Namespace string `json:"namespace" yaml:"namespace"` - - // Owner is the release group spec secret for this application - // we create an secret for each release group spec - // this secret is assigned as the Owner of this spec - // when we delete the secret, the application is also deleted - Owner owner `json:"owner" yaml:"owner"` - - // Chart is the Helm Chart for this application - Chart releaseSpec `json:"chart" yaml:"chart"` -} - -// the name of a release will depend on: -// -// the name of the release group spec (releaseGroupSpecName) -// the ID of the release spec (releaseSpecID) -func getReleaseName(releaseGroupSpecName string, releaseSpecID string) string { - return fmt.Sprintf("autox-%s-%s", releaseGroupSpecName, releaseSpecID) -} - -// shouldCreateApplication will return true if an application should be created -// an application should be created if there is no preexisting application or -// if the values are different from those from the previous application -func shouldCreateApplication(values map[string]interface{}, releaseName string) bool { - // get application - uPApp, _ := k8sClient.dynamicClient.Resource(applicationGVR).Namespace(argocd).Get(context.TODO(), releaseName, metav1.GetOptions{}) // *unstructured.Unstructured previous application - if uPApp != nil { - log.Logger.Debug(fmt.Sprintf("found previous application \"%s\"", releaseName)) - - // check if the previous application is managed by Iter8 - // (if it was previously created by Iter8) - if manager, ok := uPApp.GetLabels()[managedByLabel]; !ok || manager != iter8 { - log.Logger.Debug(fmt.Sprintf("previous application is not managed by Iter8 \"%s\"", releaseName)) - return false - } - - // extract values from previous application - pValuesString, _, err := unstructured.NestedString(uPApp.UnstructuredContent(), applicationValuesPath...) // pValuesString previous values - if err != nil { - log.Logger.Warn(fmt.Sprintf("cannot extract values of previous application \"%s\": %s: %s", releaseName, pValuesString, err)) - } - - var pValues map[string]interface{} - err = yaml.Unmarshal([]byte(pValuesString), &pValues) - if err != nil { - log.Logger.Warn(fmt.Sprintf("cannot parse values of previous application \"%s\": %s: %s", releaseName, pValuesString, err)) - } - - log.Logger.Debug(fmt.Sprintf("previous values: \"%s\"\nnew values: \"%s\"", pValues, values)) - - shouldCreateApplication := !reflect.DeepEqual(pValues, values) - if shouldCreateApplication { - log.Logger.Debug(fmt.Sprintf("replace previous application \"%s\"", releaseName)) - } else { - log.Logger.Debug(fmt.Sprintf("do not replace previous application \"%s\"", releaseName)) - } - - return shouldCreateApplication - } - - // there is no preexisting application, so should create one - return true -} - -func executeApplicationTemplate(applicationTemplate string, values applicationValues) (*unstructured.Unstructured, error) { - tpl, err := base.CreateTemplate(applicationTemplate) - if err != nil { - log.Logger.Error("could not create application template: ", err) - return nil, err - } - - var buf bytes.Buffer - err = tpl.Execute(&buf, values) - if err != nil { - log.Logger.Error("could not execute application template: ", err) - return nil, err - } - - jsonBytes, err := yaml.YAMLToJSON(buf.Bytes()) - if err != nil { - log.Logger.Error(fmt.Sprintf("could not convert YAML to JSON: \"%s\": \"%s\"", buf.String(), err)) - return nil, err - } - - // decode pending application into unstructured.UnstructuredJSONScheme - // source: https://github.com/kubernetes/client-go/blob/1ac8d459351e21458fd1041f41e43403eadcbdba/dynamic/simple.go#L186 - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, jsonBytes) - if err != nil { - log.Logger.Error(fmt.Sprintf("could not decode object into unstructured.UnstructuredJSONScheme: \"%s\": \"%s\"", buf.String(), err)) - return nil, err - } - - return uncastObj.(*unstructured.Unstructured), nil -} - -// applyApplication will apply an application based on a release spec -func applyApplication(releaseName string, releaseGroupSpecName string, releaseSpec releaseSpec, namespace string, additionalValues map[string]interface{}) error { - // get release group spec secret, based on autoX group label - // secret is assigned as the owner of the application - labelSelector := fmt.Sprintf("%s=%s", autoXGroupLabel, releaseGroupSpecName) - secretList, err := k8sClient.clientset.CoreV1().Secrets(argocd).List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - if err != nil { - log.Logger.Error("could not list release group spec secrets: ", err) - return err - } - - // ensure that only one secret is found - if secretsLen := len(secretList.Items); secretsLen == 0 { - err = errors.New("expected release group spec secret with label selector" + labelSelector + "but none were found") - log.Logger.Error(err) - return err - } else if secretsLen > 1 { - err = errors.New("expected release group spec secret with label selector" + labelSelector + "but more than one were found") - log.Logger.Error(err) - return err - } - secret := secretList.Items[0] - - values := applicationValues{ // template values - Name: releaseName, - Namespace: namespace, - - Owner: owner{ - Name: secret.Name, - UID: string(secret.GetUID()), // assign the release group spec secret as the owner of the application - }, - - Chart: releaseSpec, - } - - // add additionalValues to the values - // Argo CD will create a new experiment if it sees that the additionalValues are different from the previous experiment - // additionalValues will contain the pruned labels from the Kubernetes object - if values.Chart.Values == nil { - values.Chart.Values = map[string]interface{}{} - } - values.Chart.Values[autoXAdditionalValues] = additionalValues - - // check if the pending application will be different from the previous application, if it exists - // only create a new application if it will be different (the values will be different) - if s := shouldCreateApplication(values.Chart.Values, releaseName); s { - // delete previous application if it exists - uPApp, _ := k8sClient.dynamicClient.Resource(applicationGVR).Namespace(argocd).Get(context.TODO(), releaseName, metav1.GetOptions{}) // *unstructured.Unstructured previous application - if uPApp != nil { - if err1 := deleteApplication(releaseName); err1 != nil { - log.Logger.Error(fmt.Sprintf("could not delete previous application: \"%s\": \"%s\"", releaseName, err)) - } - } - - // execute application template - uApp, err := executeApplicationTemplate(tplStr, values) - if err != nil { - return err - } - - // apply application to the K8s cluster - log.Logger.Debug(fmt.Sprintf("apply application \"%s\"", releaseName)) - err = retry.OnError( - wait.Backoff{ - Steps: int(timeout / interval), - Cap: timeout, - Duration: interval, - Factor: 1.0, - Jitter: 0.1, - }, - func(err error) bool { - log.Logger.Error(err) - return true - }, // retry on all failures - func() error { - _, err = k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).Create(context.TODO(), uApp, metav1.CreateOptions{}) - return err - }, - ) - if err != nil { - log.Logger.Error(fmt.Sprintf("could not create application: \"%s\": \"%s\"", releaseName, err)) - return err - } - } - - return nil -} - -// deleteApplication deletes an application based on a given release name -func deleteApplication(releaseName string) error { - log.Logger.Debug(fmt.Sprintf("delete application \"%s\"", releaseName)) - - err := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).Delete(context.TODO(), releaseName, metav1.DeleteOptions{}) - if err != nil { - log.Logger.Error(fmt.Sprintf("could not delete application \"%s\": \"%s\"", releaseName, err)) - return err - } - - return nil -} - -// doChartAction iterates through a release group spec and performs apply/delete action for each release spec -// action can be apply or delete -func doChartAction(chartAction chartAction, releaseGroupSpecName string, releaseGroupSpec releaseGroupSpec, namespace string, additionalValues map[string]interface{}) error { - // get group - var err error - for releaseSpecID, releaseSpec := range releaseGroupSpec.ReleaseSpecs { - // get release name - releaseName := getReleaseName(releaseGroupSpecName, releaseSpecID) - - // perform action for this release - switch chartAction { - case applyAction: - // if there is an error, keep going forward in the for loop - if err1 := applyApplication(releaseName, releaseGroupSpecName, releaseSpec, namespace, additionalValues); err1 != nil { - err = errors.New("one or more Helm release applications failed") - } - - case deleteAction: - // if there is an error, keep going forward in the for loop - if err1 := deleteApplication(releaseName); err1 != nil { - err = errors.New("one or more Helm release deletions failed") - } - } - } - - if err != nil { - log.Logger.Error(err) - } - - return err -} - -// pruneLabels will extract the labels that are relevant for autoX -// currently, the important labels are: -// -// nameLabel = "app.kubernetes.io/name" -// versionLabel = "app.kubernetes.io/version" -// trackLabel = "iter8.tools/track" -func pruneLabels(labels map[string]string) map[string]interface{} { - prunedLabels := map[string]interface{}{} - for _, l := range []string{nameLabel, versionLabel, trackLabel} { - prunedLabels[l] = labels[l] - } - return prunedLabels -} - -// hasVersionLabel checks if version label is present -func hasVersionLabel(labels map[string]string) bool { - version, ok := labels[versionLabel] - return ok && version != "" -} - -// handle is the entry point to all (add, update, delete) event handlers -func handle(obj interface{}, releaseGroupSpecName string, releaseGroupSpec releaseGroupSpec) { - m.Lock() - defer m.Unlock() - - // parse object - u := obj.(*unstructured.Unstructured) - - // check if name matches trigger - name := u.GetName() - if name != releaseGroupSpec.Trigger.Name { - return - } - - // at this point, we know that we are really handling an event for the trigger object - // name, namespace, and GVR should all match - log.Logger.Debug(fmt.Sprintf("handle kubernetes resource object: name: \"%s\", namespace: \"%s\", kind: \"%s\", labels: \"%s\"", u.GetName(), u.GetNamespace(), u.GetKind(), u.GetLabels())) - - // namespace and GVR should already match trigger - ns := u.GetNamespace() - // Note: GVR is from the release group spec, not available through the obj - gvr := getGVR(releaseGroupSpec) - - // get (client) object from cluster - clientU, _ := k8sClient.dynamicClient.Resource(gvr).Namespace(ns).Get(context.TODO(), name, metav1.GetOptions{}) - - // if (client) object exists - // delete applications if (client) object does not have version label - // then apply applications if (client) object has version label - if clientU != nil { - // check if version label exists - clientLabels := clientU.GetLabels() - if !hasVersionLabel(clientLabels) { - log.Logger.Debugf("delete applications for release group \"%s\" (no %s label)", releaseGroupSpecName, versionLabel) - - _ = doChartAction(deleteAction, releaseGroupSpecName, releaseGroupSpec, "", nil) - - // if version label does not exist, there is no need to apply applications, so return - return - } - - // apply applications for the release group - clientPrunedLabels := pruneLabels(clientLabels) - _ = doChartAction(applyAction, releaseGroupSpecName, releaseGroupSpec, ns, clientPrunedLabels) - } else { // delete applications if (client) object does not exist - _ = doChartAction(deleteAction, releaseGroupSpecName, releaseGroupSpec, "", nil) - } -} - -// getGVR gets the namespace and GVR from a release group spec trigger -func getGVR(releaseGroupSpec releaseGroupSpec) schema.GroupVersionResource { - gvr := schema.GroupVersionResource{ - Group: releaseGroupSpec.Trigger.Group, - Version: releaseGroupSpec.Trigger.Version, - Resource: releaseGroupSpec.Trigger.Resource, - } - - return gvr -} - -func addObject(releaseGroupSpecName string, releaseGroupSpec releaseGroupSpec) func(obj interface{}) { - return func(obj interface{}) { - handle(obj, releaseGroupSpecName, releaseGroupSpec) - } -} - -func updateObject(releaseGroupSpecName string, releaseGroupSpec releaseGroupSpec) func(oldObj, obj interface{}) { - return func(oldObj, obj interface{}) { - handle(obj, releaseGroupSpecName, releaseGroupSpec) - } -} - -func deleteObject(releaseGroupSpecName string, releaseGroupSpec releaseGroupSpec) func(obj interface{}) { - return func(obj interface{}) { - handle(obj, releaseGroupSpecName, releaseGroupSpec) - } -} - -type iter8Watcher struct { - factories map[string]dynamicinformer.DynamicSharedInformerFactory -} - -func newIter8Watcher(autoXConfig config) *iter8Watcher { - w := &iter8Watcher{ - // the key is the name of the release group spec (releaseGroupSpecName) - factories: map[string]dynamicinformer.DynamicSharedInformerFactory{}, - } - - // create a factory for each trigger - // there is a 1:1 correspondence between each trigger and release group spec - // effectively, we are creating one factory per trigger - // the key to the factories map is the name of the release group spec (releaseGroupSpecName) - for releaseGroupSpecName, releaseGroupSpec := range autoXConfig.Specs { - releaseGroupSpecName := releaseGroupSpecName - releaseGroupSpec := releaseGroupSpec - - ns := releaseGroupSpec.Trigger.Namespace - gvr := getGVR(releaseGroupSpec) - - w.factories[releaseGroupSpecName] = dynamicinformer.NewFilteredDynamicSharedInformerFactory(k8sClient.dynamicClient, 0, ns, nil) - - informer := w.factories[releaseGroupSpecName].ForResource(gvr) - _, err := informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: addObject(releaseGroupSpecName, releaseGroupSpec), - UpdateFunc: updateObject(releaseGroupSpecName, releaseGroupSpec), - DeleteFunc: deleteObject(releaseGroupSpecName, releaseGroupSpec), - }) - - if err != nil { - log.Logger.Error(fmt.Sprintf("cannot add event handler for namespace \"%s\" and GVR \"%s\": \"%s\"", ns, gvr, err)) - } - } - - return w -} - -func (watcher *iter8Watcher) start(stopChannel chan struct{}) { - for _, f := range watcher.factories { - f.Start(stopChannel) - } -} diff --git a/autox/informer_test.go b/autox/informer_test.go deleted file mode 100644 index 946fff893..000000000 --- a/autox/informer_test.go +++ /dev/null @@ -1,366 +0,0 @@ -package autox - -import ( - // abnapp "github.com/iter8-tools/iter8/abn/application" - // "github.com/iter8-tools/iter8/abn/k8sclient" - - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" - v1 "k8s.io/api/core/v1" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// TestShouldCreateApplication tests the function shouldCreateApplication(), which determines if an application should -// created/updated based on whether or not there is a preexisting one -func TestShouldCreateApplication(t *testing.T) { - // 1) nothing in cluster - // therefore, return true (no concern for preexisting application) - k8sClient = newFakeKubeClient(cli.New()) - assert.True(t, shouldCreateApplication(map[string]interface{}{}, "test")) - - // 2) existing application, new application has the same values - // therefore, return false (not necessary to recreate application) - values := applicationValues{ - Name: "test", - Namespace: "default", - Chart: releaseSpec{ - Values: map[string]interface{}{}, - }, - } - - // simulating additional values - values.Chart.Values["hello"] = "world" - - // execute application template - uApp, err := executeApplicationTemplate(tplStr, values) - assert.NoError(t, err) - _, err = k8sClient.dynamicClient.Resource(applicationGVR).Namespace(argocd).Create(context.Background(), uApp, metav1.CreateOptions{}) - assert.NoError(t, err) - - // same values (values.Chart.Values) - // therefore, return false - assert.False(t, shouldCreateApplication(values.Chart.Values, "test")) - - // 3) existing application, new application has different values - // therefore, return true (old application can be replaced with new one) - - // different values - // therefore, return true - assert.True(t, shouldCreateApplication(map[string]interface{}{"something": "different"}, "test")) - - // 4) existing application but application is not managed by Iter8 - // therefore return false (Iter8 does not have permission to replace the old application) - - // setting managed by to something other than Iter8 - uApp.SetLabels(map[string]string{ - managedByLabel: "abc", - }) - - _, err = k8sClient.dynamicClient.Resource(applicationGVR).Namespace(argocd).Update(context.Background(), uApp, metav1.UpdateOptions{}) - assert.NoError(t, err) - - assert.False(t, shouldCreateApplication(map[string]interface{}{"something": "different"}, "test")) -} - -// TestApplyApplication tests the function applyApplication(), which applys Argo CD applications -func TestApplyApplication(t *testing.T) { - k8sClient = newFakeKubeClient(cli.New()) - - releaseGroupSpecName := "testReleaseGroupSpecName" - releaseSpecName := "testReleaseSpecName" - applicationName := fmt.Sprintf("autox-%s-%s", releaseGroupSpecName, releaseSpecName) - spec := releaseSpec{ - Name: applicationName, - Values: map[string]interface{}{}, - } - additionalValues := map[string]interface{}{} - - // 1) no release group spec secret - // therefore, fail - assert.Error(t, applyApplication(applicationName, releaseGroupSpecName, spec, "default", additionalValues)) - - // 2) create application with no conflicts - // create release group spec secret - // therefore, no fail - releaseGroupSpecSecret := v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: argocd, - Labels: map[string]string{ - "iter8.tools/autox-group": releaseGroupSpecName, - }, - }, - } - - _, err := k8sClient.clientset.CoreV1().Secrets(argocd).Create(context.Background(), &releaseGroupSpecSecret, metav1.CreateOptions{}) - assert.NoError(t, err) - - // ensure application does not exist - _, err = k8sClient.dynamicClient.Resource(applicationGVR).Namespace(argocd).Get(context.Background(), applicationName, metav1.GetOptions{}) - assert.Error(t, err) - - assert.NoError(t, applyApplication(applicationName, releaseGroupSpecName, spec, "default", additionalValues)) - - // ensure application exists - _, err = k8sClient.dynamicClient.Resource(applicationGVR).Namespace(argocd).Get(context.Background(), applicationName, metav1.GetOptions{}) - assert.NoError(t, err) - - // 3) create application with conflicts - // fallback is to do nothing - // therefore, no fail - assert.NoError(t, applyApplication(applicationName, releaseGroupSpecName, spec, "default", additionalValues)) -} - -// TestDeleteApplication tests the function deleteApplication(), which deletes Argo CD applications -func TestDeleteApplication(t *testing.T) { - k8sClient = newFakeKubeClient(cli.New()) - - releaseGroupSpecName := "testReleaseGroupSpecName" - releaseSpecName := "testReleaseSpecName" - applicationName := fmt.Sprintf("autox-%s-%s", releaseGroupSpecName, releaseSpecName) - - // 1) no application - // therefore, fail - assert.Error(t, deleteApplication(applicationName)) - - // 2) delete existing application - // therefore, no fail - - // create application - values := applicationValues{ - Name: applicationName, - Chart: releaseSpec{ - Name: applicationName, - Values: map[string]interface{}{}, - }, - } - uApp, err := executeApplicationTemplate(tplStr, values) - assert.NoError(t, err) - _, err = k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).Create(context.TODO(), uApp, metav1.CreateOptions{}) - assert.NoError(t, err) - - // ensure there is an application - _, err = k8sClient.dynamicClient.Resource(applicationGVR).Namespace(argocd).Get(context.Background(), applicationName, metav1.GetOptions{}) - assert.NoError(t, err) - - assert.NoError(t, deleteApplication(applicationName)) - - // ensure there is no application anymore - _, err = k8sClient.dynamicClient.Resource(applicationGVR).Namespace(argocd).Get(context.Background(), applicationName, metav1.GetOptions{}) - assert.Error(t, err) -} - -// Check to see if add, update, delete handlers from the watcher are properly invoked -// after the watcher is created using newIter8Watcher() -func TestNewIter8Watcher(t *testing.T) { - // autoX needs the config - autoXConfig := readConfig("../testdata/autox_inputs/config.example.yaml") - - namespace := "default" - releaseSpecName := "myApp" - version := "v1" - track := "" - appName1 := "autox-myApp-name1" - appName2 := "autox-myApp-name2" - - gvr := schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", - } - - // define and start watcher - k8sClient = newFakeKubeClient(cli.New()) - - // create releaseSpec secret - releaseGroupSpecSecret := v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: argocd, - Labels: map[string]string{ - "iter8.tools/autox-group": releaseSpecName, - }, - }, - } - _, err := k8sClient.clientset.CoreV1().Secrets(argocd).Create(context.Background(), &releaseGroupSpecSecret, metav1.CreateOptions{}) - assert.NoError(t, err) - - w := newIter8Watcher(autoXConfig) - assert.NotNil(t, w) - done := make(chan struct{}) - defer close(done) - w.start(done) - - // 1) create object with random name and no version label - // no application should be created - objRandNameNoAutoXLabel, err := k8sClient.dynamic(). - Resource(gvr).Namespace(namespace). - Create( - context.TODO(), - newUnstructuredDeployment( - namespace, - "rand", // random name - "", // no version label - track, - map[string]string{}, - ), - metav1.CreateOptions{}, - ) - assert.NoError(t, err) - assert.NotNil(t, objRandNameNoAutoXLabel) - - // no applications - assert.Eventually(t, func() bool { - list, _ := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).List(context.Background(), metav1.ListOptions{}) - return assert.Equal(t, len(list.Items), 0) - }, 5*time.Second, time.Second) - - // 2) create object with random name and version label - // no application should be created - objRandNameAutoXLabel, err := k8sClient.dynamic(). - Resource(gvr).Namespace(namespace). - Create( - context.TODO(), - newUnstructuredDeployment( - namespace, - "rand2", // random name - version, // version label - track, - map[string]string{}, - ), - metav1.CreateOptions{}, - ) - assert.NoError(t, err) - assert.NotNil(t, objRandNameAutoXLabel) - - // no applications - assert.Eventually(t, func() bool { - list, _ := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).List(context.Background(), metav1.ListOptions{}) - return assert.Equal(t, len(list.Items), 0) - }, 5*time.Second, time.Second) - - // 3) create object with trigger name and no version label - // no application should be created - objNoAutoXLabel, err := k8sClient.dynamic(). - Resource(gvr).Namespace(namespace). - Create( - context.TODO(), - newUnstructuredDeployment( - namespace, - releaseSpecName, // trigger name - "", // no version label - track, - map[string]string{}), - metav1.CreateOptions{}, - ) - assert.NoError(t, err) - assert.NotNil(t, objNoAutoXLabel) - - // no applications - assert.Eventually(t, func() bool { - list, _ := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).List(context.Background(), metav1.ListOptions{}) - return assert.Equal(t, len(list.Items), 0) - }, 5*time.Second, time.Second) - - // delete the object so we can recreate it with autoX label - err = k8sClient.dynamic().Resource(gvr).Namespace(namespace).Delete(context.TODO(), releaseSpecName, metav1.DeleteOptions{}) - assert.NoError(t, err) - - // 4) create object with trigger name with version label - // 2 applications should be created - // one for each release spec in the config - // autox-myapp-name1 and autox-myapp-name2 - createdObj, err := k8sClient.dynamic(). - Resource(gvr).Namespace(namespace). - Create( - context.TODO(), - newUnstructuredDeployment( - namespace, - releaseSpecName, // trigger name - version, // version label - track, - map[string]string{}, - ), - metav1.CreateOptions{}, - ) - assert.NoError(t, err) - assert.NotNil(t, createdObj) - - // 2 applications - assert.Eventually(t, func() bool { - list, _ := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).List(context.Background(), metav1.ListOptions{}) - return assert.Equal(t, len(list.Items), 2) - }, 5*time.Second, time.Second) - - // check applications by name - assert.Eventually(t, func() bool { - app, _ := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).Get(context.Background(), appName1, metav1.GetOptions{}) - return assert.NotNil(t, app) - }, 5*time.Second, time.Second) - assert.Eventually(t, func() bool { - app, _ := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).Get(context.Background(), appName2, metav1.GetOptions{}) - return assert.NotNil(t, app) - }, 5*time.Second, time.Second) - - // 5) delete version label - // all applications deleted - (createdObj.Object["metadata"].(map[string]interface{}))["labels"].(map[string]interface{})[versionLabel] = nil - _, err = k8sClient.dynamic(). - Resource(gvr).Namespace(namespace). - Update( - context.TODO(), - createdObj, - metav1.UpdateOptions{}, - ) - assert.NoError(t, err) - - // 0 applications - assert.Eventually(t, func() bool { - list, _ := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).List(context.Background(), metav1.ListOptions{}) - return assert.Equal(t, len(list.Items), 0) - }, 5*time.Second, time.Second) -} - -func newUnstructuredDeployment(namespace, application, version, track string, additionalLabels map[string]string) *unstructured.Unstructured { - annotations := map[string]interface{}{ - "iter8.tools/ready": "true", - } - if track != "" { - annotations[trackLabel] = track - } - - labels := map[string]interface{}{ - nameLabel: application, - versionLabel: version, - "iter8.tools/ready": "true", - } - - // add additionalLabels to labels - if len(additionalLabels) > 0 { - for labelName, labelValue := range additionalLabels { - labels[labelName] = labelValue - } - } - - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": map[string]interface{}{ - "namespace": namespace, - "name": application, - "labels": labels, - "annotations": annotations, - }, - "spec": application, - }, - } -} diff --git a/autox/k8sclient.go b/autox/k8sclient.go deleted file mode 100644 index 94314eb51..000000000 --- a/autox/k8sclient.go +++ /dev/null @@ -1,71 +0,0 @@ -package autox - -import ( - "errors" - - "github.com/iter8-tools/iter8/base/log" - - "helm.sh/helm/v3/pkg/cli" - - // Import to initialize client auth plugins. - _ "k8s.io/client-go/plugin/pkg/client/auth" - - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" -) - -// kubeClient embeds Kube configuration, and -// enables interaction with a Kubernetes cluster through Kube APIs -type kubeClient struct { - // EnvSettings provides generic Kubernetes options - *cli.EnvSettings - - // clientset enables interaction with a Kubernetes cluster using structured types - clientset kubernetes.Interface - - // dynamicClient enables unstructured interaction with a Kubernetes cluster - dynamicClient dynamic.Interface -} - -// newKubeClient creates an empty KubeClient -func newKubeClient(s *cli.EnvSettings) *kubeClient { - return &kubeClient{ - EnvSettings: s, - // default other fields - } -} - -// init initializes the Kubernetes clientset -func (c *kubeClient) init() (err error) { - if c.dynamicClient == nil { - // get rest config - restConfig, err := c.EnvSettings.RESTClientGetter().ToRESTConfig() - if err != nil { - e := errors.New("unable to get Kubernetes REST config") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - // get clientset - c.clientset, err = kubernetes.NewForConfig(restConfig) - if err != nil { - e := errors.New("unable to get Kubernetes clientset") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - - // get dynamic client - c.dynamicClient, err = dynamic.NewForConfig(restConfig) - if err != nil { - e := errors.New("unable to get Kubernetes dynamic client") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - } - - return nil -} - -func (c *kubeClient) dynamic() dynamic.Interface { - return c.dynamicClient -} diff --git a/autox/k8sclient_test.go b/autox/k8sclient_test.go deleted file mode 100644 index efb2daeb1..000000000 --- a/autox/k8sclient_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package autox - -import ( - "helm.sh/helm/v3/pkg/cli" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - dynamicfake "k8s.io/client-go/dynamic/fake" - "k8s.io/client-go/kubernetes/fake" - ktesting "k8s.io/client-go/testing" -) - -// newFakeKubeClient returns a fake Kubernetes client that is able to manage secrets -// Includes dynamic client with Deployments as listed objects -// Used by test cases in several packages to define (global) k8sclient.Client for testing -func newFakeKubeClient(s *cli.EnvSettings, objects ...runtime.Object) *kubeClient { - fakeClient := kubeClient{ - EnvSettings: s, - // default other fields - } - - // secretDataReactor sets the secret.Data field based on the values from secret.StringData - // Credit: this function is adapted from https://github.com/creydr/go-k8s-utils - var secretDataReactor = func(action ktesting.Action) (bool, runtime.Object, error) { - secret, _ := action.(ktesting.CreateAction).GetObject().(*corev1.Secret) - - if secret.Data == nil { - secret.Data = make(map[string][]byte) - } - - for k, v := range secret.StringData { - secret.Data[k] = []byte(v) - } - - return false, nil, nil - } - - fc := fake.NewSimpleClientset(objects...) - fc.PrependReactor("create", "secrets", secretDataReactor) - fc.PrependReactor("update", "secrets", secretDataReactor) - fakeClient.clientset = fc - - // fakeClient.dynamicClient = dynamicfake.NewSimpleDynamicClient(runtime.NewScheme()) - fakeClient.dynamicClient = dynamicfake.NewSimpleDynamicClientWithCustomListKinds( - runtime.NewScheme(), - map[schema.GroupVersionResource]string{ - {Group: "apps", Version: "v1", Resource: "deployments"}: "DeploymentList", - applicationGVR: "ApplicationList", - }, - objects...) - - return &fakeClient -} diff --git a/autox/watcher.go b/autox/watcher.go deleted file mode 100644 index dc3928b7f..000000000 --- a/autox/watcher.go +++ /dev/null @@ -1,91 +0,0 @@ -package autox - -import ( - "fmt" - "os" - - "github.com/iter8-tools/iter8/base/log" - - "helm.sh/helm/v3/pkg/cli" -) - -const ( - // configEnv is the name of environment variable with file path to the config - configEnv = "CONFIG" -) - -var k8sClient *kubeClient - -// validateConfig validates config, which contains all the release group specs -func validateConfig(c config) error { - var err error - - triggerStrings := map[string]bool{} - - // iterate through all the release group specs - for releaseGroupSpecID, releaseGroupSpec := range c.Specs { - // validate trigger - if releaseGroupSpec.Trigger.Name == "" { - err = fmt.Errorf("trigger in spec group \"%s\" does not have a name", releaseGroupSpecID) - break - } - - if releaseGroupSpec.Trigger.Namespace == "" { - err = fmt.Errorf("trigger in spec group \"%s\" does not have a namespace", releaseGroupSpecID) - break - } - - if releaseGroupSpec.Trigger.Version == "" { - err = fmt.Errorf("trigger in spec group \"%s\" does not have a version", releaseGroupSpecID) - break - } - - if releaseGroupSpec.Trigger.Resource == "" { - err = fmt.Errorf("trigger in spec group \"%s\" does not have a resource", releaseGroupSpecID) - break - } - - // check for trigger uniqueness - triggerString := fmt.Sprintf("%s/%s/%s/%s/%s", releaseGroupSpec.Trigger.Name, releaseGroupSpec.Trigger.Namespace, releaseGroupSpec.Trigger.Group, releaseGroupSpec.Trigger.Version, releaseGroupSpec.Trigger.Resource) - if _, ok := triggerStrings[triggerString]; ok { - err = fmt.Errorf("multiple release specs with the same trigger: name: \"%s\", namespace: \"%s\", group: \"%s\", version: \"%s\", resource: \"%s\",", releaseGroupSpec.Trigger.Name, releaseGroupSpec.Trigger.Namespace, releaseGroupSpec.Trigger.Group, releaseGroupSpec.Trigger.Version, releaseGroupSpec.Trigger.Resource) - break - } - triggerStrings[triggerString] = true - } - - return err -} - -// Start is entry point to configure services and start them -func Start(stopCh chan struct{}, autoxK *kubeClient) error { - if autoxK == nil { - // get a default client - k8sClient = newKubeClient(cli.New()) - } else { - // set it here - k8sClient = autoxK - } - - // initialize kubernetes driver - if err := k8sClient.init(); err != nil { - log.Logger.Fatal("unable to init k8s client") - } - - // read release group specs - configFile, ok := os.LookupEnv(configEnv) - if !ok { - log.Logger.Fatal("group configuration file is required") - } - config := readConfig(configFile) - - // validate the release group specs - err := validateConfig(config) - if err != nil { - return err - } - - w := newIter8Watcher(config) - go w.start(stopCh) - return nil -} diff --git a/autox/watcher_test.go b/autox/watcher_test.go deleted file mode 100644 index 5dfcee244..000000000 --- a/autox/watcher_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package autox - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func TestStart(t *testing.T) { - // Start() requires some environment variables to be set - _ = os.Setenv(configEnv, "../testdata/autox_inputs/config.example.yaml") - - stopCh := make(chan struct{}) - defer close(stopCh) - _ = Start(stopCh, newFakeKubeClient(cli.New())) - - gvr := schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", - } - namespace := "default" - releaseSpecName := "myApp" - version := "v1" - track := "" - - // create releaseSpec secret - releaseGroupSpecSecret := v1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-secret", - Namespace: argocd, - Labels: map[string]string{ - "iter8.tools/autox-group": releaseSpecName, - }, - }, - } - _, err := k8sClient.clientset.CoreV1().Secrets(argocd).Create(context.Background(), &releaseGroupSpecSecret, metav1.CreateOptions{}) - assert.NoError(t, err) - - createdObj, err := k8sClient.dynamic(). - Resource(gvr).Namespace(namespace). - Create( - context.TODO(), - newUnstructuredDeployment( - namespace, - releaseSpecName, - version, - track, - map[string]string{ - // autoXLabel: "true", // add the autoXLabel, which will allow applyApplication() to trigger - }, - ), metav1.CreateOptions{}, - ) - assert.NoError(t, err) - assert.NotNil(t, createdObj) - - // 2 applications - // one for each release spec in the config - // autox-myapp-name1 and autox-myapp-name2 - assert.Eventually(t, func() bool { - list, _ := k8sClient.dynamic().Resource(applicationGVR).Namespace(argocd).List(context.Background(), metav1.ListOptions{}) - return assert.Equal(t, len(list.Items), 2) - }, 5*time.Second, time.Second) -} - -func TestValidateConfig(t *testing.T) { - tests := []struct { - c config - err string - }{ - { - config{ - Specs: map[string]releaseGroupSpec{ - "test": {}, - }, - }, - "trigger in spec group \"test\" does not have a name", - }, - { - config{ - Specs: map[string]releaseGroupSpec{ - "test": { - Trigger: trigger{ - Name: "test", - }, - }, - }, - }, - "trigger in spec group \"test\" does not have a namespace", - }, - { - config{ - Specs: map[string]releaseGroupSpec{ - "test": { - Trigger: trigger{ - Name: "test", - Namespace: "default", - }, - }, - }, - }, - "trigger in spec group \"test\" does not have a version", - }, - { - config{ - Specs: map[string]releaseGroupSpec{ - "test": { - Trigger: trigger{ - Name: "test", - Namespace: "default", - Version: "v1", - }, - }, - }, - }, - "trigger in spec group \"test\" does not have a resource", - }, - { - config{ - Specs: map[string]releaseGroupSpec{ - "test": { - Trigger: trigger{ - Name: "test", - Namespace: "default", - Version: "v1", - Resource: "deployments", - }, - }, - "test2": { - Trigger: trigger{ - Name: "test", - Namespace: "default", - Version: "v1", - Resource: "deployments", - }, - }, - }, - }, - "multiple release specs with the same trigger: name: \"test\", namespace: \"default\", group: \"\", version: \"v1\", resource: \"deployments\",", - }, - } - - for _, e := range tests { - err := validateConfig(e.c) - fmt.Println(err) - assert.EqualError(t, err, e.err) - } -} diff --git a/base/assess.go b/base/assess.go deleted file mode 100644 index 7bc44bcfa..000000000 --- a/base/assess.go +++ /dev/null @@ -1,161 +0,0 @@ -package base - -import ( - "errors" - - "github.com/iter8-tools/iter8/base/log" -) - -// assessInputs contain the inputs to the assess-app-versions task to be executed. -type assessInputs struct { - // Rewards are the reward metrics - Rewards *Rewards `json:"rewards,omitempty" yaml:"rewards,omitempty"` - - // SLOs are the SLO limits - SLOs *SLOLimits `json:"SLOs,omitempty" yaml:"SLOs,omitempty"` -} - -// assessTask enables assessment of versions -type assessTask struct { - // TaskMeta has fields common to all tasks - TaskMeta - // With contains the inputs to this task - With assessInputs `json:"with" yaml:"with"` -} - -const ( - // AssessTaskName is the name of the task this file implements - AssessTaskName = "assess" -) - -// initializeDefaults sets default values for task inputs -func (t *assessTask) initializeDefaults() {} - -// validateInputs for this task -func (t *assessTask) validateInputs() error { - return nil -} - -// Run executes the assess-app-versions task -func (t *assessTask) run(exp *Experiment) error { - err := t.validateInputs() - if err != nil { - return err - } - - t.initializeDefaults() - - if exp.Result.Insights == nil { - log.Logger.Error("uninitialized insights within experiment") - return errors.New("uninitialized insights within experiment") - } - if t.With.SLOs == nil || - exp.Result.Insights.NumVersions == 0 { - // do nothing for now - // todo: fix when rewards are introduced - - log.Logger.Warn("nothing to do; returning") - return nil - } - - // set rewards (if needed) - err = exp.Result.Insights.setRewards(t.With.Rewards) - if err != nil { - return err - } - - // set SLOs (if needed) - err = exp.Result.Insights.setSLOs(t.With.SLOs) - if err != nil { - return err - } - - // set initialize SLOsSatisfied (if needed) - err = exp.initializeSLOsSatisfied() - if err != nil { - return err - } - - // set SLOsSatisfied - if t.With.SLOs != nil { - exp.Result.Insights.SLOsSatisfied = &SLOResults{ - Upper: evaluateSLOs(exp, t.With.SLOs.Upper, true), - Lower: evaluateSLOs(exp, t.With.SLOs.Lower, false), - } - } - - // set RewardsWinners - if t.With.Rewards != nil { - exp.Result.Insights.RewardsWinners = &RewardsWinners{ - Max: evaluateRewards(exp, t.With.Rewards.Max, true), - Min: evaluateRewards(exp, t.With.Rewards.Min, false), - } - } - - return err -} - -func evaluateRewards(exp *Experiment, rewards []string, max bool) []int { - winners := make([]int, len(rewards)) - for i := 0; i < len(rewards); i++ { - for j := 0; j < exp.Result.Insights.NumVersions; j++ { - winners[i] = identifyWinner(exp, rewards[i], max) - } - } - return winners -} - -func identifyWinner(e *Experiment, reward string, max bool) int { - currentWinner := -1 - var currentWinningValue *float64 - - for j := 0; j < e.Result.Insights.NumVersions; j++ { - val := e.Result.Insights.ScalarMetricValue(j, reward) - if val == nil { - log.Logger.Warnf("unable to find value for version %v and metric %s", j, reward) - continue - } - if currentWinningValue == nil || (max && *val > *currentWinningValue) || (!max && *val < *currentWinningValue) { - currentWinningValue = val - currentWinner = j - } - } - - return currentWinner -} - -// evaluate SLOs and output the boolean SLO X version matrix -func evaluateSLOs(exp *Experiment, slos []SLO, upper bool) [][]bool { - slosSatisfied := make([][]bool, len(slos)) - for i := 0; i < len(slos); i++ { - slosSatisfied[i] = make([]bool, exp.Result.Insights.NumVersions) - for j := 0; j < exp.Result.Insights.NumVersions; j++ { - slosSatisfied[i][j] = sloSatisfied(exp, slos, i, j, upper) - } - } - return slosSatisfied -} - -// sloSatisfied returns true if SLO i satisfied by version j -func sloSatisfied(e *Experiment, slos []SLO, i int, j int, upper bool) bool { - val := e.Result.Insights.ScalarMetricValue(j, slos[i].Metric) - // check if metric is available - if val == nil { - log.Logger.Warnf("unable to find value for version %v and metric %s", j, slos[i].Metric) - return false - } - - if upper { - // check upper limit - if *val > slos[i].Limit { - return false - } - } else { - // check lower limit - if *val < slos[i].Limit { - return false - } - } - - return true -} diff --git a/base/assess_test.go b/base/assess_test.go deleted file mode 100644 index bed6128e3..000000000 --- a/base/assess_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package base - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -// Test a runnable assert condition here -func TestRunAssess(t *testing.T) { - _ = os.Chdir(t.TempDir()) - // simple assess without any SLOs - // should succeed - task := &assessTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(AssessTaskName), - }, - With: assessInputs{}, - } - exp := &Experiment{ - Spec: []Task{task}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - err := task.run(exp) - assert.NoError(t, err) - - // assess with an SLO - // should succeed - task.With = assessInputs{ - SLOs: &SLOLimits{ - Upper: []SLO{{ - Metric: "a/b", - Limit: 20.0, - }}, - }, - Rewards: &Rewards{ - Max: []string{"a/b"}, - }, - } - err = task.run(exp) - assert.NoError(t, err) -} diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index 2e70a9cf3..3e6c0e308 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -1,6 +1,8 @@ package base import ( + "encoding/json" + "fmt" "os" "strings" "testing" @@ -266,60 +268,17 @@ func TestMockGRPCWithSLOsAndPercentiles(t *testing.T) { }, } - at := &assessTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(AssessTaskName), - }, - With: assessInputs{ - SLOs: &SLOLimits{ - Lower: []SLO{{ - Metric: "grpc/request-count", - Limit: 100, - }}, - Upper: []SLO{{ - Metric: "grpc/latency/mean", - Limit: 100, - }, { - Metric: "grpc/latency/p95.00", - Limit: 200, - }, { - Metric: "grpc/latency/stddev", - Limit: 20, - }, { - Metric: "grpc/latency/max", - Limit: 200, - }, { - Metric: "grpc/error-count", - Limit: 0, - }, { - Metric: "grpc/request-count", - Limit: 100, - }}, - }, - }, - } exp := &Experiment{ - Spec: []Task{ct, at}, + Spec: []Task{ct}, } exp.initResults(1) _ = exp.Result.initInsightsWithNumVersions(1) err = exp.Spec[0].run(exp) assert.NoError(t, err) - err = exp.Spec[1].run(exp) - assert.NoError(t, err) - // assert SLOs are satisfied - for _, v := range exp.Result.Insights.SLOsSatisfied.Upper { - for _, b := range v { - assert.True(t, b) - } - } - for _, v := range exp.Result.Insights.SLOsSatisfied.Lower { - for _, b := range v { - assert.True(t, b) - } - } + expjson, _ := json.Marshal(exp) + fmt.Println(string(expjson)) expBytes, _ := yaml.Marshal(exp) log.Logger.Debug("\n" + string(expBytes)) diff --git a/base/experiment.go b/base/experiment.go index c36b9fda7..322ce2072 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -113,18 +113,6 @@ type Insights struct { // the outer slice must be the same length as the number of tracks // the map key must match the name of the summary metric in MetricsInfo SummaryMetricValues []map[string]summarymetrics.SummaryMetric - - // SLOs involved in this experiment - SLOs *SLOLimits `json:"SLOs,omitempty" yaml:"SLOs,omitempty"` - - // SLOsSatisfied indicator matrices that show if upper and lower SLO limits are satisfied - SLOsSatisfied *SLOResults `json:"SLOsSatisfied,omitempty" yaml:"SLOsSatisfied,omitempty"` - - // Rewards involed in this experiment - Rewards *Rewards `json:"rewards,omitempty" yaml:"rewards,omitempty"` - - // RewardsWinners indicate the winners - RewardsWinners *RewardsWinners `json:"rewardsWinners,omitempty" yaml:"rewardsWinners,omitempty"` } // MetricMeta describes a metric @@ -279,14 +267,6 @@ func (s *ExperimentSpec) UnmarshalJSON(data []byte) error { return e } tsk = cgt - case AssessTaskName: - at := &assessTask{} - if err := json.Unmarshal(tBytes, at); err != nil { - e := errors.New("json unmarshal error") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - tsk = at case NotifyTaskName: nt := ¬ifyTask{} if err := json.Unmarshal(tBytes, nt); err != nil { @@ -398,40 +378,6 @@ func (in *Insights) updateMetric(m string, mm MetricMeta, i int, val interface{} return nil } -// setRewards sets the Rewards field in insights -// if this function is called multiple times (example, due to looping), then -// it is intended to be called with the same argument each time -func (in *Insights) setRewards(rewards *Rewards) error { - if in.SLOs != nil { - if reflect.DeepEqual(in.Rewards, rewards) { - return nil - } - e := fmt.Errorf("old and new value of rewards conflict") - log.Logger.WithStackTrace(fmt.Sprint("old: ", in.Rewards, "new: ", rewards)).Error(e) - return e - } - // LHS will be nil - in.Rewards = rewards - return nil -} - -// setSLOs sets the SLOs field in insights -// if this function is called multiple times (example, due to looping), then -// it is intended to be called with the same argument each time -func (in *Insights) setSLOs(slos *SLOLimits) error { - if in.SLOs != nil { - if reflect.DeepEqual(in.SLOs, slos) { - return nil - } - e := fmt.Errorf("old and new value of slos conflict") - log.Logger.WithStackTrace(fmt.Sprint("old: ", in.SLOs, "new: ", slos)).Error(e) - return e - } - // LHS will be nil - in.SLOs = slos - return nil -} - // TrackVersionStr creates a string of version name/track for display purposes func (in *Insights) TrackVersionStr(i int) string { // if VersionNames not defined or all fields empty return default "version i" @@ -454,29 +400,6 @@ func (in *Insights) TrackVersionStr(i int) string { return in.VersionNames[i].Track + " (" + in.VersionNames[i].Version + ")" } -// initializeSLOsSatisfied initializes the SLOs satisfied field -func (exp *Experiment) initializeSLOsSatisfied() error { - if exp.Result.Insights.SLOsSatisfied != nil { - return nil // already initialized - } - // LHS will be nil - exp.Result.Insights.SLOsSatisfied = &SLOResults{ - Upper: make([][]bool, 0), - Lower: make([][]bool, 0), - } - if exp.Result.Insights.SLOs != nil { - exp.Result.Insights.SLOsSatisfied.Upper = make([][]bool, len(exp.Result.Insights.SLOs.Upper)) - for i := 0; i < len(exp.Result.Insights.SLOs.Upper); i++ { - exp.Result.Insights.SLOsSatisfied.Upper[i] = make([]bool, exp.Result.Insights.NumVersions) - } - exp.Result.Insights.SLOsSatisfied.Lower = make([][]bool, len(exp.Result.Insights.SLOs.Lower)) - for i := 0; i < len(exp.Result.Insights.SLOs.Lower); i++ { - exp.Result.Insights.SLOsSatisfied.Lower[i] = make([]bool, exp.Result.Insights.NumVersions) - } - } - return nil -} - // initResults initializes the results section of an experiment func (exp *Experiment) initResults(revision int) { exp.Result = &ExperimentResult{ @@ -821,68 +744,6 @@ func (exp *Experiment) NoFailure() bool { return exp != nil && exp.Result != nil && !exp.Result.Failure } -// getSLOsSatisfiedBy returns the set of versions which satisfy SLOs -func (exp *Experiment) getSLOsSatisfiedBy() []int { - if exp == nil { - log.Logger.Warning("nil experiment") - return nil - } - if exp.Result == nil { - log.Logger.Warning("nil experiment result") - return nil - } - if exp.Result.Insights == nil { - log.Logger.Warning("nil insights in experiment result") - return nil - } - if exp.Result.Insights.NumVersions == 0 { - log.Logger.Warning("experiment does not involve any versions") - return nil - } - if exp.Result.Insights.SLOs == nil { - log.Logger.Info("experiment does not involve any SLOs") - sat := []int{} - for j := 0; j < exp.Result.Insights.NumVersions; j++ { - sat = append(sat, j) - } - return sat - } - log.Logger.Debug("experiment involves at least one version and at least one SLO") - log.Logger.Trace(exp.Result.Insights.SLOs) - log.Logger.Trace(exp.Result.Insights.SLOsSatisfied) - log.Logger.Trace(exp.Result.Insights.NonHistMetricValues) - sat := []int{} - for j := 0; j < exp.Result.Insights.NumVersions; j++ { - satThis := true - for i := 0; i < len(exp.Result.Insights.SLOs.Upper); i++ { - satThis = satThis && exp.Result.Insights.SLOsSatisfied.Upper[i][j] - if !satThis { - break - } - } - for i := 0; i < len(exp.Result.Insights.SLOs.Lower); i++ { - satThis = satThis && exp.Result.Insights.SLOsSatisfied.Lower[i][j] - if !satThis { - break - } - } - if satThis { - sat = append(sat, j) - } - } - return sat -} - -// SLOs returns true if all versions satisfy SLOs -func (exp *Experiment) SLOs() bool { - if exp == nil || exp.Result == nil || exp.Result.Insights == nil { - log.Logger.Warning("experiment, or result, or insights is nil") - return false - } - sby := exp.getSLOsSatisfiedBy() - return exp.Result.Insights.NumVersions == len(sby) -} - // run the experiment func (exp *Experiment) run(driver Driver) error { var err error diff --git a/base/experiment_test.go b/base/experiment_test.go index 3239fd827..c34a900d5 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -6,7 +6,6 @@ import ( "testing" "fortio.org/fortio/fhttp" - "github.com/iter8-tools/iter8/base/log" "github.com/stretchr/testify/assert" "sigs.k8s.io/yaml" ) @@ -59,23 +58,8 @@ func TestRunningTasks(t *testing.T) { }, } - // valid assess task... should succeed - at := &assessTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(AssessTaskName), - }, - With: assessInputs{ - SLOs: &SLOLimits{ - Upper: []SLO{{ - Metric: httpMetricPrefix + "/" + builtInHTTPErrorCountID, - Limit: 0, - }}, - }, - }, - } - exp := &Experiment{ - Spec: []Task{ct, at}, + Spec: []Task{ct}, Result: &ExperimentResult{}, } exp.initResults(1) @@ -84,14 +68,6 @@ func TestRunningTasks(t *testing.T) { assert.Equal(t, exp.Result.Insights.NumVersions, 1) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) - - err = at.run(exp) - assert.NoError(t, err) - - // SLOs should be satisfied by app - for i := 0; i < len(exp.Result.Insights.SLOs.Upper); i++ { // i^th SLO - assert.True(t, exp.Result.Insights.SLOsSatisfied.Upper[i][0]) // satisfied by only version - } } func TestRunExperiment(t *testing.T) { @@ -120,9 +96,6 @@ func TestRunExperiment(t *testing.T) { assert.True(t, e.Completed()) assert.True(t, e.NoFailure()) - expBytes, _ := yaml.Marshal(e) - log.Logger.Debug("\n" + string(expBytes)) - assert.True(t, e.SLOs()) } func TestFailExperiment(t *testing.T) { diff --git a/base/mock_qs_test.go b/base/mock_qs_test.go index a2ad326bd..c52354647 100644 --- a/base/mock_qs_test.go +++ b/base/mock_qs_test.go @@ -29,35 +29,14 @@ func TestMockQuickStartWithSLOs(t *testing.T) { }, } - at := &assessTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(AssessTaskName), - }, - With: assessInputs{ - SLOs: &SLOLimits{ - Upper: []SLO{{ - Metric: "http/latency-mean", - Limit: 100, - }}, - }, - }, - } exp := &Experiment{ - Spec: []Task{ct, at}, + Spec: []Task{ct}, } exp.initResults(1) _ = exp.Result.initInsightsWithNumVersions(1) err := exp.Spec[0].run(exp) assert.NoError(t, err) - err = exp.Spec[1].run(exp) - assert.NoError(t, err) - // assert SLOs are satisfied - for _, v := range exp.Result.Insights.SLOsSatisfied.Upper { - for _, b := range v { - assert.True(t, b) - } - } } func TestMockQuickStartWithSLOsAndPercentiles(t *testing.T) { @@ -80,36 +59,12 @@ func TestMockQuickStartWithSLOsAndPercentiles(t *testing.T) { }, } - at := &assessTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(AssessTaskName), - }, - With: assessInputs{ - SLOs: &SLOLimits{ - Upper: []SLO{{ - Metric: "http/latency-mean", - Limit: 100, - }, { - Metric: "http/latency-p95.00", - Limit: 200, - }}, - }, - }, - } exp := &Experiment{ - Spec: []Task{ct, at}, + Spec: []Task{ct}, } exp.initResults(1) _ = exp.Result.initInsightsWithNumVersions(1) err := exp.Spec[0].run(exp) assert.NoError(t, err) - err = exp.Spec[1].run(exp) - assert.NoError(t, err) - // assert SLOs are satisfied - for _, v := range exp.Result.Insights.SLOsSatisfied.Upper { - for _, b := range v { - assert.True(t, b) - } - } } diff --git a/cmd/autox.go b/cmd/autox.go deleted file mode 100644 index d8e2dacdd..000000000 --- a/cmd/autox.go +++ /dev/null @@ -1,41 +0,0 @@ -package cmd - -import ( - "os" - "os/signal" - "syscall" - - "github.com/iter8-tools/iter8/autox" - "github.com/spf13/cobra" -) - -// autoxDesc is the description of autox cmd -const autoxDesc = ` -Run the Iter8 autoX controller. - iter8 autox -` - -// newAutoXCmd creates the autox command -func newAutoXCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "autox", - Short: "Start the Iter8 autoX controller", - Long: autoxDesc, - RunE: func(_ *cobra.Command, _ []string) error { - stopCh := make(chan struct{}) - defer close(stopCh) - - if err := autox.Start(stopCh, nil); err != nil { - return err - } - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGTERM, os.Interrupt) - <-sigCh - - return nil - }, - SilenceUsage: true, - Hidden: true, - } - return cmd -} diff --git a/cmd/k.go b/cmd/k.go index 7a51a1a9b..908bdd309 100644 --- a/cmd/k.go +++ b/cmd/k.go @@ -40,9 +40,6 @@ func init() { os.Exit(1) } - // add k assert - kcmd.AddCommand(newKAssertCmd(kd)) - // add k delete kcmd.AddCommand(newKDeleteCmd(kd, os.Stdout)) @@ -52,9 +49,6 @@ func init() { // add k log kcmd.AddCommand(newKLogCmd(kd)) - // add k report - kcmd.AddCommand(newKReportCmd(kd)) - // add k run kcmd.AddCommand(newKRunCmd(kd, os.Stdout)) diff --git a/cmd/kassert.go b/cmd/kassert.go deleted file mode 100644 index 1d484ad7e..000000000 --- a/cmd/kassert.go +++ /dev/null @@ -1,71 +0,0 @@ -package cmd - -import ( - "errors" - "fmt" - "time" - - ia "github.com/iter8-tools/iter8/action" - "github.com/iter8-tools/iter8/base/log" - "github.com/iter8-tools/iter8/driver" - "github.com/spf13/cobra" -) - -// kassertDesc is the description of the k assert cmd -const kassertDesc = ` -Assert if the result of a Kubernetes experiment satisfies the specified conditions. If all conditions are satisfied, the command exits with code 0. Else, the command exits with code 1. - -Assertions are especially useful for automation inside CI/CD/GitOps pipelines. - -Supported conditions are 'completed', 'nofailure', 'slos', which indicate that the experiment has completed, none of the tasks have failed, and the SLOs are satisfied. - - iter8 k assert -c completed -c nofailure -c slos - # same as iter8 k assert -c completed,nofailure,slos - -You can optionally specify a timeout, which is the maximum amount of time to wait for the conditions to be satisfied: - - iter8 k assert -c completed,nofailure,slos -t 5s -` - -// newAssertCmd creates the Kubernetes assert command -func newKAssertCmd(kd *driver.KubeDriver) *cobra.Command { - actor := ia.NewAssertOpts(kd) - - cmd := &cobra.Command{ - Use: "assert", - Short: "Assert if Kubernetes experiment result satisfies conditions", - Long: kassertDesc, - SilenceUsage: true, - RunE: func(_ *cobra.Command, _ []string) error { - allGood, err := actor.KubeRun() - if err != nil { - return err - } - if !allGood { - e := errors.New("assert conditions failed") - log.Logger.Error(e) - return e - } - return nil - }, - } - // options specific to k assert - addExperimentGroupFlag(cmd, &actor.Group) - actor.EnvSettings = settings - - // options shared with assert - addConditionFlag(cmd, &actor.Conditions) - addTimeoutFlag(cmd, &actor.Timeout) - return cmd -} - -// addConditionFlag adds the condition flag to command -func addConditionFlag(cmd *cobra.Command, conditionPtr *[]string) { - cmd.Flags().StringSliceVarP(conditionPtr, "condition", "c", nil, fmt.Sprintf("%v | %v | %v; can specify multiple or separate conditions with commas;", ia.Completed, ia.NoFailure, ia.SLOs)) - _ = cmd.MarkFlagRequired("condition") -} - -// addTimeoutFlag adds timeout flag to command -func addTimeoutFlag(cmd *cobra.Command, timeoutPtr *time.Duration) { - cmd.Flags().DurationVar(timeoutPtr, "timeout", 0, "timeout duration (e.g., 5s)") -} diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go deleted file mode 100644 index c2dc5e649..000000000 --- a/cmd/kassert_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package cmd - -import ( - "context" - "fmt" - "os" - "path/filepath" - "testing" - - "fortio.org/fortio/fhttp" - "github.com/iter8-tools/iter8/base" - id "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestKAssert(t *testing.T) { - _ = os.Chdir(t.TempDir()) - - // create and configure HTTP endpoint for testing - mux, addr := fhttp.DynamicHTTPServer(false) - url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) - var verifyHandlerCalled bool - mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) - - // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, id.ExperimentPath) - - // run test - testAssert(t, id.ExperimentPath, url, "output/kassert.txt", false) - // sanity check -- handler was called - assert.True(t, verifyHandlerCalled) -} - -func TestKAssertFailsSLOs(t *testing.T) { - _ = os.Chdir(t.TempDir()) - - // create and configure HTTP endpoint for testing - mux, addr := fhttp.DynamicHTTPServer(false) - url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) - var verifyHandlerCalled bool - mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) - - // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment_fails.tpl"), url, id.ExperimentPath) - - // run test - testAssert(t, id.ExperimentPath, url, "output/kassertfails.txt", true) - // sanity check -- handler was called - assert.True(t, verifyHandlerCalled) -} - -func testAssert(t *testing.T, experiment string, url string, expectedOutputFile string, expectError bool) { - tests := []cmdTestCase{ - // k launch - { - name: "k launch", - cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http,assess} --set http.url=%s --set http.duration=2s", base.CompletePath("../charts", "iter8"), url), - golden: base.CompletePath("../testdata", "output/klaunch.txt"), - }, - // k run - { - name: "k run", - cmd: "k run -g default --namespace default", - }, - // k assert - { - name: "k assert", - cmd: "k assert -c completed -c nofailure -c slos", - golden: base.CompletePath("../testdata", expectedOutputFile), - wantError: expectError, - }, - } - - // fake kube cluster - *kd = *id.NewFakeKubeDriver(settings) - - // read experiment from file created by caller - byteArray, _ := os.ReadFile(filepath.Clean(experiment)) - _, _ = kd.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{id.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - _, _ = kd.Clientset.BatchV1().Jobs("default").Create(context.TODO(), &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default-1-job", - Namespace: "default", - }, - }, metav1.CreateOptions{}) - - runTestActionCmd(t, tests) -} diff --git a/cmd/kreport.go b/cmd/kreport.go deleted file mode 100644 index 06337e48b..000000000 --- a/cmd/kreport.go +++ /dev/null @@ -1,48 +0,0 @@ -package cmd - -import ( - ia "github.com/iter8-tools/iter8/action" - "github.com/iter8-tools/iter8/driver" - - "github.com/spf13/cobra" -) - -// kreportDesc is the description of the k report cmd -const kreportDesc = ` -Generate a text or HTML report of a Kubernetes experiment. - - iter8 k report - # same as iter8 k report -o text - -or - - iter8 k report -o html > report.html - # view with browser -` - -// newKReportCmd creates the Kubernetes report command -func newKReportCmd(kd *driver.KubeDriver) *cobra.Command { - actor := ia.NewReportOpts(kd) - - cmd := &cobra.Command{ - Use: "report", - Short: "Generate report for Kubernetes experiment", - Long: kreportDesc, - SilenceUsage: true, - RunE: func(_ *cobra.Command, _ []string) error { - return actor.KubeRun(outStream) - }, - } - // options specific to k report - addExperimentGroupFlag(cmd, &actor.Group) - actor.EnvSettings = settings - - // options shared with report - addOutputFormatFlag(cmd, &actor.OutputFormat) - return cmd -} - -// addOutputFormatFlag adds output format flag to the report command -func addOutputFormatFlag(cmd *cobra.Command, outputFormat *string) { - cmd.Flags().StringVarP(outputFormat, "outputFormat", "o", "text", "text | html") -} diff --git a/cmd/kreport_test.go b/cmd/kreport_test.go deleted file mode 100644 index 3fd86c8e8..000000000 --- a/cmd/kreport_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package cmd - -import ( - "context" - "os" - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - id "github.com/iter8-tools/iter8/driver" - - "github.com/iter8-tools/iter8/base" -) - -func TestKReport(t *testing.T) { - _ = os.Chdir(t.TempDir()) - tests := []cmdTestCase{ - // k report - { - name: "k report", - cmd: "k report", - golden: base.CompletePath("../testdata", "output/kreport.txt"), - }, - } - - // mock the environment - // fake kube cluster - *kd = *id.NewFakeKubeDriver(settings) - byteArray, _ := os.ReadFile(base.CompletePath("../testdata/assertinputs", id.ExperimentPath)) - _, _ = kd.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{id.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - runTestActionCmd(t, tests) -} diff --git a/cmd/root.go b/cmd/root.go index a73b983d9..a4130fb5b 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -69,9 +69,6 @@ func init() { rootCmd.PersistentFlags().StringVarP(&logLevel, "loglevel", "l", "info", "trace, debug, info, warning, error, fatal, panic") rootCmd.SilenceErrors = true // will get printed in Execute() (by cobra.CheckErr()) - // add autox - rootCmd.AddCommand(newAutoXCmd()) - // add docs rootCmd.AddCommand(newDocsCmd()) diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index ec57547ce..73ff99f84 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -33,7 +33,7 @@ func TestLocalRun(t *testing.T) { // check results exp, err := base.BuildExperiment(&fd) assert.NoError(t, err) - assert.True(t, exp.Completed() && exp.NoFailure() && exp.SLOs()) + assert.True(t, exp.Completed() && exp.NoFailure()) } func TestFileDriverReadError(t *testing.T) { diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index 773290f05..a2144cec6 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -108,7 +108,7 @@ func TestKubeRun(t *testing.T) { // check results exp, err := base.BuildExperiment(kd) assert.NoError(t, err) - assert.True(t, exp.Completed() && exp.NoFailure() && exp.SLOs()) + assert.True(t, exp.Completed() && exp.NoFailure()) } func TestLogs(t *testing.T) { From b17de2e23e92bef42dcffb564f512392bfcd7180 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 31 Jul 2023 10:20:00 -0400 Subject: [PATCH 018/121] Remove assess, report, autox, and reports Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 2 - .github/workflows/lintcharts2.yaml | 2 - action/run_test.go | 2 +- base/experiment_test.go | 8 +- charts/iter8/templates/_experiment.tpl | 6 +- charts/iter8/templates/_task-assess.tpl | 40 ---- cmd/root.go | 5 - cmd/test_helpers.go | 1 - testdata/assertinputs/.gitignore | 2 - testdata/assertinputs/experiment.yaml | 177 ------------------ .../assertinputs/experimentWithLowerSLOs.yaml | 172 ----------------- .../assertinputs/noinsights/experiment.yaml | 37 ---- testdata/assertinputsfail/.gitignore | 2 - testdata/assertinputsfail/experiment.yaml | 159 ---------------- testdata/config.yaml | 6 - testdata/drivertests/experiment.tpl | 39 +--- testdata/experiment.tpl | 39 +--- testdata/experiment.yaml | 39 +--- testdata/experiment_db.yaml | 29 +-- testdata/experiment_fails.tpl | 39 +--- testdata/experiment_grpc.yaml | 43 ++--- testdata/output/krun.txt | 7 - 22 files changed, 58 insertions(+), 798 deletions(-) delete mode 100644 charts/iter8/templates/_task-assess.tpl delete mode 100644 testdata/assertinputs/.gitignore delete mode 100644 testdata/assertinputs/experiment.yaml delete mode 100644 testdata/assertinputs/experimentWithLowerSLOs.yaml delete mode 100644 testdata/assertinputs/noinsights/experiment.yaml delete mode 100644 testdata/assertinputsfail/.gitignore delete mode 100644 testdata/assertinputsfail/experiment.yaml delete mode 100644 testdata/config.yaml diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index fe027a183..8211f88de 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -210,7 +210,6 @@ jobs: --set ready.service="httpbin" \ --set ready.timeout=60s \ --set http.url=http://httpbin.default \ - --set assess.SLOs.upper.http/latency-mean=50 \ --set runner=job - name: k assert experiment completed without failures run: | @@ -241,7 +240,6 @@ jobs: --set ready.timeout=60s \ --set ready.namespace=default \ --set http.url=http://httpbin.default/get \ - --set assess.SLOs.upper.http/latency-mean=50 \ --set runner=job - name: k assert experiment completed without failures run: | diff --git a/.github/workflows/lintcharts2.yaml b/.github/workflows/lintcharts2.yaml index 940e8327d..499f8cab5 100644 --- a/.github/workflows/lintcharts2.yaml +++ b/.github/workflows/lintcharts2.yaml @@ -38,8 +38,6 @@ jobs: --set ready.service=httpbin \ --set ready.timeout=60s \ --set http.url=http://httpbin.default/get \ - --set assess.SLOs.upper.http/latency-mean=50 \ - --set assess.SLOs.upper.http/error-count=0 \ --set runner=job >> iter8.yaml - name: Lint Kubernetes YAML file diff --git a/action/run_test.go b/action/run_test.go index 51269d7c0..f7684bbd3 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -50,5 +50,5 @@ func TestKubeRun(t *testing.T) { assert.NoError(t, err) assert.True(t, exp.Completed()) assert.True(t, exp.NoFailure()) - assert.Equal(t, 4, exp.Result.NumCompletedTasks) + assert.Equal(t, 1, exp.Result.NumCompletedTasks) } diff --git a/base/experiment_test.go b/base/experiment_test.go index c34a900d5..a20817d22 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -18,21 +18,21 @@ func TestReadExperiment(t *testing.T) { e := &Experiment{} err = yaml.Unmarshal(b, e) assert.NoError(t, err) - assert.Equal(t, 4, len(e.Spec)) + assert.Equal(t, 1, len(e.Spec)) b, err = os.ReadFile(CompletePath("../testdata", "experiment_grpc.yaml")) assert.NoError(t, err) e = &Experiment{} err = yaml.Unmarshal(b, e) assert.NoError(t, err) - assert.Equal(t, 3, len(e.Spec)) + assert.Equal(t, 1, len(e.Spec)) b, err = os.ReadFile(CompletePath("../testdata", "experiment_db.yaml")) assert.NoError(t, err) e = &Experiment{} err = yaml.Unmarshal(b, e) assert.NoError(t, err) - assert.Equal(t, 4, len(e.Spec)) + assert.Equal(t, 1, len(e.Spec)) } func TestRunningTasks(t *testing.T) { @@ -87,7 +87,7 @@ func TestRunExperiment(t *testing.T) { e := &Experiment{} err = yaml.Unmarshal(b, e) assert.NoError(t, err) - assert.Equal(t, 4, len(e.Spec)) + assert.Equal(t, 1, len(e.Spec)) err = RunExperiment(false, &mockDriver{e}) assert.NoError(t, err) diff --git a/charts/iter8/templates/_experiment.tpl b/charts/iter8/templates/_experiment.tpl index f49b1374b..5f09eface 100644 --- a/charts/iter8/templates/_experiment.tpl +++ b/charts/iter8/templates/_experiment.tpl @@ -7,9 +7,7 @@ metadata: namespace: {{ .Release.Namespace }} spec: {{- range .Values.tasks }} - {{- if eq "assess" . }} - {{- include "task.assess" $.Values.assess -}} - {{- else if eq "custommetrics" . }} + {{- if eq "custommetrics" . }} {{- include "task.custommetrics" $.Values.custommetrics -}} {{- else if eq "grpc" . }} {{- include "task.grpc" $.Values.grpc -}} @@ -22,7 +20,7 @@ spec: {{- else if eq "github" . }} {{- include "task.github" $.Values.github -}} {{- else }} - {{- fail "task name must be one of assess, custommetrics, grpc, http, ready, github, or slack" -}} + {{- fail "task name must be one of custommetrics, grpc, http, ready, github, or slack" -}} {{- end }} {{- end }} result: diff --git a/charts/iter8/templates/_task-assess.tpl b/charts/iter8/templates/_task-assess.tpl deleted file mode 100644 index 8b3e92019..000000000 --- a/charts/iter8/templates/_task-assess.tpl +++ /dev/null @@ -1,40 +0,0 @@ -{{- define "task.assess" -}} -{{- if . }} -# task: validate service level objectives for app using -# the metrics collected in an earlier task -- task: assess - with: -{{- if .SLOs }} - SLOs: -{{- if .SLOs.upper }} - upper: -{{- range $m, $l := .SLOs.upper }} - - metric: {{ $m }} - limit: {{ $l }} -{{- end }} -{{- end }} -{{- if .SLOs.lower }} - lower: -{{- range $m, $l := .SLOs.lower }} - - metric: {{ $m }} - limit: {{ $l }} -{{- end }} -{{- end }} -{{- end }} -{{- if .rewards }} - rewards: -{{- if .rewards.max }} - max: -{{- range $r, $val := .rewards.max }} - - {{ $val }} -{{- end }} -{{- end }} -{{- if .rewards.min }} - min: -{{- range $r, $val := .rewards.min }} - - {{ $val }} -{{- end }} -{{- end }} -{{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/cmd/root.go b/cmd/root.go index a4130fb5b..e0f89fee6 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -1,9 +1,6 @@ package cmd import ( - "io" - "os" - "github.com/iter8-tools/iter8/controllers/k8sclient" "github.com/iter8-tools/iter8/driver" @@ -22,8 +19,6 @@ var ( settings = cli.New() // KubeDriver used by actions package kd = driver.NewKubeDriver(settings) - // output stream where log messages are printed - outStream io.Writer = os.Stdout // kubeclient is the client used for controllers package kubeClient k8sclient.Interface ) diff --git a/cmd/test_helpers.go b/cmd/test_helpers.go index 41f4c0dfe..986196164 100644 --- a/cmd/test_helpers.go +++ b/cmd/test_helpers.go @@ -91,7 +91,6 @@ func executeActionCommandStdinC(store *storage.Storage, in *os.File, cmd string) rootCmd.SetErr(buf) rootCmd.SetArgs(args) log.Logger.Out = buf - outStream = buf oldStdin := os.Stdin if in != nil { diff --git a/testdata/assertinputs/.gitignore b/testdata/assertinputs/.gitignore deleted file mode 100644 index 4e9ba03c6..000000000 --- a/testdata/assertinputs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -!experiment.yaml -!result.yaml \ No newline at end of file diff --git a/testdata/assertinputs/experiment.yaml b/testdata/assertinputs/experiment.yaml deleted file mode 100644 index b0f9c0009..000000000 --- a/testdata/assertinputs/experiment.yaml +++ /dev/null @@ -1,177 +0,0 @@ -spec: - # task 1: generate HTTP requests for application URL - # collect Iter8's built-in HTTP latency and error-related metrics - - task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: https://httpbin.org/get - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - rewards: - max: - - "http/latency-mean" - - "http/latency-p50" - min: - - "http/error-rate" - SLOs: - Upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1000 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" -result: - failure: false - insights: - rewards: - max: - - "http/latency-mean" - - "http/latency-p50" - min: - - "http/error-rate" - rewardsWinners: - max: - - 0 - - 0 - min: - - 0 - SLOs: - upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1000 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - SLOsSatisfied: - upper: - - - true - - - true - - - true - - - true - - - true - - - true - histMetricValues: - - http/latency: - - count: 3 - lower: 11.388804 - upper: 12 - - count: 7 - lower: 12 - upper: 14 - - count: 4 - lower: 14 - upper: 16 - - count: 1 - lower: 16 - upper: 18.000000000000004 - - count: 1 - lower: 250 - upper: 272.838867 - metricsInfo: - http/error-count: - description: number of responses that were errors - type: Counter - http/error-rate: - description: fraction of responses that were errors - type: Gauge - http/latency: - description: Latency Histogram - type: Histogram - units: msec - http/latency-max: - description: maximum of observed latency values - type: Gauge - units: msec - http/latency-mean: - description: mean of observed latency values - type: Gauge - units: msec - http/latency-min: - description: minimum of observed latency values - type: Gauge - units: msec - http/latency-p50: - description: 50-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p75: - description: 75-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p90: - description: 90-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p95: - description: 95-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p99: - description: 99-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p99.9: - description: 99.9-th percentile of observed latency values - type: Gauge - units: msec - http/latency-stddev: - description: standard deviation of observed latency values - type: Gauge - units: msec - http/request-count: - description: number of requests sent - type: Counter - nonHistMetricValues: - - http/error-count: - - 0 - http/error-rate: - - 0 - http/latency-max: - - 272.838867 - http/latency-mean: - - 29.624432499999998 - http/latency-min: - - 11.388804 - http/latency-p50: - - 13.428571428571429 - http/latency-p75: - - 15 - http/latency-p90: - - 16.8 - http/latency-p95: - - 254.56777339999996 - http/latency-p99: - - 269.18464828 - http/latency-p99.9: - - 272.473445128 - http/latency-stddev: - - 62.81583554772398 - http/request-count: - - 16 - numVersions: 1 - iter8Version: v0.13 - numCompletedTasks: 4 - startTime: "2022-03-16T10:22:58.540897-04:00" diff --git a/testdata/assertinputs/experimentWithLowerSLOs.yaml b/testdata/assertinputs/experimentWithLowerSLOs.yaml deleted file mode 100644 index 35773f728..000000000 --- a/testdata/assertinputs/experimentWithLowerSLOs.yaml +++ /dev/null @@ -1,172 +0,0 @@ -spec: - # task 1: generate HTTP requests for application URL - # collect Iter8's built-in HTTP latency and error-related metrics - - task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: https://httpbin.org/get - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - Upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1000 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - Lower: - - metric: "user/engagement" - limit: 10000000 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" -result: - failure: false - insights: - SLOs: - upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1000 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - lower: - - metric: "user/engagement" - limit: 10000000 - SLOsSatisfied: - upper: - - - true - - - true - - - true - - - true - - - true - - - true - lower: - - - true - histMetricValues: - - http/latency: - - count: 3 - lower: 11.388804 - upper: 12 - - count: 7 - lower: 12 - upper: 14 - - count: 4 - lower: 14 - upper: 16 - - count: 1 - lower: 16 - upper: 18.000000000000004 - - count: 1 - lower: 250 - upper: 272.838867 - metricsInfo: - http/error-count: - description: number of responses that were errors - type: Counter - http/error-rate: - description: fraction of responses that were errors - type: Gauge - http/latency: - description: Latency Histogram - type: Histogram - units: msec - http/latency-max: - description: maximum of observed latency values - type: Gauge - units: msec - http/latency-mean: - description: mean of observed latency values - type: Gauge - units: msec - http/latency-min: - description: minimum of observed latency values - type: Gauge - units: msec - http/latency-p50: - description: 50-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p75: - description: 75-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p90: - description: 90-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p95: - description: 95-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p99: - description: 99-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p99.9: - description: 99.9-th percentile of observed latency values - type: Gauge - units: msec - http/latency-stddev: - description: standard deviation of observed latency values - type: Gauge - units: msec - http/request-count: - description: number of requests sent - type: Counter - user/engagement: - description: number of user engagements - type: Counter - nonHistMetricValues: - - http/error-count: - - 0 - http/error-rate: - - 0 - http/latency-max: - - 272.838867 - http/latency-mean: - - 29.624432499999998 - http/latency-min: - - 11.388804 - http/latency-p50: - - 13.428571428571429 - http/latency-p75: - - 15 - http/latency-p90: - - 16.8 - http/latency-p95: - - 254.56777339999996 - http/latency-p99: - - 269.18464828 - http/latency-p99.9: - - 272.473445128 - http/latency-stddev: - - 62.81583554772398 - http/request-count: - - 16 - user/engagement: - - 100000000 - numVersions: 1 - iter8Version: v0.13 - numCompletedTasks: 4 - startTime: "2022-03-16T10:22:58.540897-04:00" diff --git a/testdata/assertinputs/noinsights/experiment.yaml b/testdata/assertinputs/noinsights/experiment.yaml deleted file mode 100644 index 390f55985..000000000 --- a/testdata/assertinputs/noinsights/experiment.yaml +++ /dev/null @@ -1,37 +0,0 @@ -spec: - # task 1: generate HTTP requests for application URL - # collect Iter8's built-in HTTP latency and error-related metrics - - task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: https://httpbin.org/get - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - Upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1000 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" -result: - failure: false - iter8Version: v0.13 - numCompletedTasks: 4 - startTime: "2022-03-16T10:22:58.540897-04:00" diff --git a/testdata/assertinputsfail/.gitignore b/testdata/assertinputsfail/.gitignore deleted file mode 100644 index 4e9ba03c6..000000000 --- a/testdata/assertinputsfail/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -!experiment.yaml -!result.yaml \ No newline at end of file diff --git a/testdata/assertinputsfail/experiment.yaml b/testdata/assertinputsfail/experiment.yaml deleted file mode 100644 index bfd0f2fdc..000000000 --- a/testdata/assertinputsfail/experiment.yaml +++ /dev/null @@ -1,159 +0,0 @@ -spec: - # task 1: generate HTTP requests for application URL - # collect Iter8's built-in HTTP latency and error-related metrics - - task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: https://httpbin.org/get - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - Upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1000 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" -result: - failure: false - insights: - SLOs: - upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1000 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - SLOsSatisfied: - upper: - - - false - - - true - - - true - - - true - - - true - - - true - histMetricValues: - - http/latency: - - count: 3 - lower: 11.388804 - upper: 12 - - count: 7 - lower: 12 - upper: 14 - - count: 4 - lower: 14 - upper: 16 - - count: 1 - lower: 16 - upper: 18.000000000000004 - - count: 1 - lower: 250 - upper: 272.838867 - metricsInfo: - http/error-count: - description: number of responses that were errors - type: Counter - http/error-rate: - description: fraction of responses that were errors - type: Gauge - http/latency: - description: Latency Histogram - type: Histogram - units: msec - http/latency-max: - description: maximum of observed latency values - type: Gauge - units: msec - http/latency-mean: - description: mean of observed latency values - type: Gauge - units: msec - http/latency-min: - description: minimum of observed latency values - type: Gauge - units: msec - http/latency-p50: - description: 50-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p75: - description: 75-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p90: - description: 90-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p95: - description: 95-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p99: - description: 99-th percentile of observed latency values - type: Gauge - units: msec - http/latency-p99.9: - description: 99.9-th percentile of observed latency values - type: Gauge - units: msec - http/latency-stddev: - description: standard deviation of observed latency values - type: Gauge - units: msec - http/request-count: - description: number of requests sent - type: Counter - nonHistMetricValues: - - http/error-count: - - 0 - http/error-rate: - - 0 - http/latency-max: - - 272.838867 - http/latency-mean: - - 29.624432499999998 - http/latency-min: - - 11.388804 - http/latency-p50: - - 13.428571428571429 - http/latency-p75: - - 15 - http/latency-p90: - - 16.8 - http/latency-p95: - - 254.56777339999996 - http/latency-p99: - - 269.18464828 - http/latency-p99.9: - - 272.473445128 - http/latency-stddev: - - 62.81583554772398 - http/request-count: - - 16 - numVersions: 1 - iter8Version: v0.13 - numCompletedTasks: 4 - startTime: "2022-03-16T10:22:58.540897-04:00" diff --git a/testdata/config.yaml b/testdata/config.yaml deleted file mode 100644 index 90dce61ec..000000000 --- a/testdata/config.yaml +++ /dev/null @@ -1,6 +0,0 @@ -http: - url: "https://httpbin.org/get" -assess: - SLOs: - upper: - http/latency-mean: 500 \ No newline at end of file diff --git a/testdata/drivertests/experiment.tpl b/testdata/drivertests/experiment.tpl index de5575b44..986adb1de 100644 --- a/testdata/drivertests/experiment.tpl +++ b/testdata/drivertests/experiment.tpl @@ -1,32 +1,9 @@ spec: - # task 1: generate HTTP requests for application URL - # collect Iter8's built-in HTTP latency and error-related metrics - - task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: {{ .URL }} - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1000 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" +# task 1: generate HTTP requests for application URL +# collect Iter8's built-in HTTP latency and error-related metrics +- task: http + with: + duration: 2s + errorRanges: + - lower: 500 + url: {{ .URL }} diff --git a/testdata/experiment.tpl b/testdata/experiment.tpl index adc7023d8..986adb1de 100644 --- a/testdata/experiment.tpl +++ b/testdata/experiment.tpl @@ -1,32 +1,9 @@ spec: - # task 1: generate HTTP requests for application URL - # collect Iter8's built-in HTTP latency and error-related metrics - - task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: {{ .URL }} - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 5000 - - metric: "http/latency-p50" - limit: 5000 - - metric: "http/latency-p50.0" - limit: 5000 - - metric: "http/latency-p95.0" - limit: 5000 - - metric: "http/latency-p99" - limit: 5000 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" +# task 1: generate HTTP requests for application URL +# collect Iter8's built-in HTTP latency and error-related metrics +- task: http + with: + duration: 2s + errorRanges: + - lower: 500 + url: {{ .URL }} diff --git a/testdata/experiment.yaml b/testdata/experiment.yaml index 18df69f27..36b734050 100644 --- a/testdata/experiment.yaml +++ b/testdata/experiment.yaml @@ -1,32 +1,9 @@ spec: - # task 1: generate HTTP requests for application URL - # collect Iter8's built-in HTTP latency and error-related metrics - - task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: https://httpbin.org/get - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 5000 - - metric: "http/latency-p50" - limit: 5000 - - metric: "http/latency-p50.0" - limit: 5000 - - metric: "http/latency-p95.0" - limit: 5000 - - metric: "http/latency-p99" - limit: 5000 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" +# task 1: generate HTTP requests for application URL +# collect Iter8's built-in HTTP latency and error-related metrics +- task: http + with: + duration: 2s + errorRanges: + - lower: 500 + url: https://httpbin.org/get diff --git a/testdata/experiment_db.yaml b/testdata/experiment_db.yaml index e788c197b..23a524be6 100644 --- a/testdata/experiment_db.yaml +++ b/testdata/experiment_db.yaml @@ -1,22 +1,9 @@ spec: - # task 1: collect custom metrics - - task: custommetrics - with: - templates: - kfserving: https://raw.githubusercontent.com/iter8-tools/iter8/master/testdata/metrics/kfserving.metrics.yaml - values: - namespace_name: ns-candidate - startingTime: Jan 2, 2006 at 3:04pm (MST) - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - Upper: - - metric: "kfserving/request-count" - limit: 0 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" +# task 1: collect custom metrics +- task: custommetrics + with: + templates: + kfserving: https://raw.githubusercontent.com/iter8-tools/iter8/master/testdata/metrics/kfserving.metrics.yaml + values: + namespace_name: ns-candidate + startingTime: Jan 2, 2006 at 3:04pm (MST) diff --git a/testdata/experiment_fails.tpl b/testdata/experiment_fails.tpl index 87347fe90..986adb1de 100644 --- a/testdata/experiment_fails.tpl +++ b/testdata/experiment_fails.tpl @@ -1,32 +1,9 @@ spec: - # task 1: generate HTTP requests for application URL - # collect Iter8's built-in HTTP latency and error-related metrics - - task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: {{ .URL }} - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - upper: - - metric: "http/error-rate" - limit: 0 - - metric: "http/latency-mean" - limit: 500 - - metric: "http/latency-p50" - limit: 1000 - - metric: "http/latency-p50.0" - limit: 1 - - metric: "http/latency-p95.0" - limit: 2500 - - metric: "http/latency-p99" - limit: 5000 - # tasks 3 & 4: print if SLOs are satisfied or not - - if: SLOs() - run: echo "SLOs satisfied" - - if: not SLOs() - run: echo "SLOs not satisfied" +# task 1: generate HTTP requests for application URL +# collect Iter8's built-in HTTP latency and error-related metrics +- task: http + with: + duration: 2s + errorRanges: + - lower: 500 + url: {{ .URL }} diff --git a/testdata/experiment_grpc.yaml b/testdata/experiment_grpc.yaml index 49d901ec6..32c97f99d 100644 --- a/testdata/experiment_grpc.yaml +++ b/testdata/experiment_grpc.yaml @@ -1,35 +1,14 @@ spec: # task 1: generate gRPC requests for application # collect Iter8's built-in gRPC latency and error-related metrics - - task: grpc - with: - total: 200 - concurrency: 5 - data: - name: bob - timeout: 10s - connect-timeeout: 5s - protoURL: "https://raw.githubusercontent.com/bojand/ghz/v0.105.0/testdata/greeter.proto" - call: "helloworld.Greeter.SayHello" - host: "127.0.0.1" - # task 2: validate service level objectives for app using - # the metrics collected in the above task - - task: assess - with: - SLOs: - Upper: - - metric: "grpc/error-rate" - limit: 0 - - metric: "grpc/latency/mean" - limit: 100 - - metric: "grpc/latency/p50" - limit: 100 - - metric: "grpc/latency/p50.0" - limit: 100 - - metric: "grpc/latency/p95.0" - limit: 250 - - metric: "grpc/latency/p97.5" - limit: 500 - # task 3: conditional run task - - if: SLOs() - run: echo "all good" +- task: grpc + with: + total: 200 + concurrency: 5 + data: + name: bob + timeout: 10s + connect-timeeout: 5s + protoURL: "https://raw.githubusercontent.com/bojand/ghz/v0.105.0/testdata/greeter.proto" + call: "helloworld.Greeter.SayHello" + host: "127.0.0.1" \ No newline at end of file diff --git a/testdata/output/krun.txt b/testdata/output/krun.txt index 08360495e..a5fc32559 100644 --- a/testdata/output/krun.txt +++ b/testdata/output/krun.txt @@ -1,9 +1,2 @@ time=1977-09-02 22:04:05 level=info msg=task 1: http: started time=1977-09-02 22:04:05 level=info msg=task 1: http: completed -time=1977-09-02 22:04:05 level=info msg=task 2: assess: started -time=1977-09-02 22:04:05 level=info msg=task 2: assess: completed -time=1977-09-02 22:04:05 level=info msg=task 3: run: started -time=1977-09-02 22:04:05 level=info msg=task 3: run: completed -time=1977-09-02 22:04:05 level=info msg=task 4: run: started -time=1977-09-02 22:04:05 level=info msg=task 4: run: skipped stack-trace=below ... -::Trace:: false condition: not SLOs() From 8cdcaa97fc270c24425e164d729ce3b1061227bb Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 31 Jul 2023 10:39:22 -0400 Subject: [PATCH 019/121] Fix workflows Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 13 ++----------- .github/workflows/testperformance.yaml | 2 +- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 8211f88de..69c101161 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -110,8 +110,6 @@ jobs: --set runner=job - name: try other iter8 k commands run: | - iter8 k assert -c completed -c nofailure --timeout 60s - iter8 k report iter8 k log iter8 k delete @@ -182,8 +180,7 @@ jobs: --set runner=job - name: try other iter8 k commands run: | - iter8 k assert -c completed -c nofailure --timeout 60s - iter8 k report + iter8 k log iter8 k delete @@ -211,9 +208,6 @@ jobs: --set ready.timeout=60s \ --set http.url=http://httpbin.default \ --set runner=job - - name: k assert experiment completed without failures - run: | - iter8 k assert -c completed -c nofailure --timeout 60s readiness-with-namespace: name: Kubernetes readiness test with namespace @@ -240,7 +234,4 @@ jobs: --set ready.timeout=60s \ --set ready.namespace=default \ --set http.url=http://httpbin.default/get \ - --set runner=job - - name: k assert experiment completed without failures - run: | - iter8 k assert -n experiments -c completed -c nofailure --timeout 60s + --set runner=job \ No newline at end of file diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index af5b6813d..5fd9f9d6d 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -250,4 +250,4 @@ jobs: iter8 k assert -c completed -c nofailure -c slos --timeout 60s iter8 k report iter8 k log - iter8 k delete \ No newline at end of file + iter8 k delete From 377f1c9652b6778b57d1564a8a2fa7cccecbb6f8 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 08:37:46 -0400 Subject: [PATCH 020/121] Modify error message Signed-off-by: Alan Cha --- driver/kubedriver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/driver/kubedriver.go b/driver/kubedriver.go index c9f1bafb4..4debb6be4 100644 --- a/driver/kubedriver.go +++ b/driver/kubedriver.go @@ -495,7 +495,7 @@ func (kd *KubeDriver) GetExperimentLogs() (string, error) { req := podsClient.GetLogs(p.Name, &corev1.PodLogOptions{}) podLogs, err := req.Stream(context.TODO()) if err != nil { - e := errors.New("error in opening log stream") + e := fmt.Errorf("error in opening log stream: %e", err) log.Logger.Error(e) return "", e } @@ -507,7 +507,7 @@ func (kd *KubeDriver) GetExperimentLogs() (string, error) { buf := new(bytes.Buffer) _, err = io.Copy(buf, podLogs) if err != nil { - e := errors.New("error in copy information from podLogs to buf") + e := fmt.Errorf("error in copy information from podLogs to buf: %e", err) log.Logger.Error(e) return "", e } From 4437a4583ed2e79cbdfd6d637e808727e1968022 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 08:41:29 -0400 Subject: [PATCH 021/121] Remove cronjob Signed-off-by: Alan Cha --- base/util_test.go | 29 +++++++++ charts/autox/.helmignore | 26 -------- charts/autox/Chart.yaml | 19 ------ charts/autox/templates/_helpers.tpl | 12 ---- charts/autox/templates/deployment.yaml | 42 ------------- charts/autox/templates/roles.yaml | 69 ---------------------- charts/autox/templates/secret.yaml | 37 ------------ charts/autox/templates/serviceaccount.yaml | 6 -- charts/autox/values.yaml | 13 ---- charts/iter8/templates/_k-cronjob.tpl | 42 ------------- charts/iter8/templates/k8s.yaml | 4 +- charts/iter8/values.yaml | 3 +- cmd/krun.go | 7 --- controllers/finalizer_test.go | 1 - controllers/interface_test.go | 12 ++++ metrics/server_test.go | 9 +++ 16 files changed, 53 insertions(+), 278 deletions(-) delete mode 100644 charts/autox/.helmignore delete mode 100644 charts/autox/Chart.yaml delete mode 100644 charts/autox/templates/_helpers.tpl delete mode 100644 charts/autox/templates/deployment.yaml delete mode 100644 charts/autox/templates/roles.yaml delete mode 100644 charts/autox/templates/secret.yaml delete mode 100644 charts/autox/templates/serviceaccount.yaml delete mode 100644 charts/autox/values.yaml delete mode 100644 charts/iter8/templates/_k-cronjob.tpl create mode 100644 controllers/interface_test.go diff --git a/base/util_test.go b/base/util_test.go index a40f7e480..c7c172ce1 100644 --- a/base/util_test.go +++ b/base/util_test.go @@ -78,3 +78,32 @@ func TestSplitApplication(t *testing.T) { assert.Equal(t, "default", ns) assert.Equal(t, "name", n) } + +type testType struct { + S string + I int + Nested struct { + S string + I int + } +} + +func TestToYAML(t *testing.T) { + obj := testType{ + S: "hello world", + I: 3, + Nested: struct { + S string + I int + }{ + S: "nested", + }, + } + + objString := ToYAML(obj) + assert.Equal(t, `I: 3 +Nested: + I: 0 + S: nested +S: hello world`, string(objString)) +} diff --git a/charts/autox/.helmignore b/charts/autox/.helmignore deleted file mode 100644 index 95fe8a639..000000000 --- a/charts/autox/.helmignore +++ /dev/null @@ -1,26 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ - -# generated files need to be ignored -experiment.yaml \ No newline at end of file diff --git a/charts/autox/Chart.yaml b/charts/autox/Chart.yaml deleted file mode 100644 index 2393e39f0..000000000 --- a/charts/autox/Chart.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v2 -name: autox -version: 0.1.9 -description: Iter8 autoX controller -type: application -keywords: -- Iter8 -- autox -- experiment -home: https://iter8.tools -sources: -- https://github.com/iter8-tools/iter8 -maintainers: -- name: Alan Cha - email: alan.cha1@ibm.com -- name: Iter8 - email: iter8-tools@gmail.com -icon: https://github.com/iter8-tools/iter8/raw/master/mkdocs/docs/images/favicon.png -appVersion: v0.15 diff --git a/charts/autox/templates/_helpers.tpl b/charts/autox/templates/_helpers.tpl deleted file mode 100644 index 9b8154785..000000000 --- a/charts/autox/templates/_helpers.tpl +++ /dev/null @@ -1,12 +0,0 @@ -{{- define "iter8-autox.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{- define "iter8-autox.labels" -}} - labels: - app.kubernetes.io/name: {{ template "iter8-autox.name" . }} - helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/version: {{ .Chart.AppVersion }} -{{- end -}} diff --git a/charts/autox/templates/deployment.yaml b/charts/autox/templates/deployment.yaml deleted file mode 100644 index fadb73cd8..000000000 --- a/charts/autox/templates/deployment.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Release.Name }} - namespace: argocd - {{ template "iter8-autox.labels" . }} -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: {{ template "iter8-autox.name" . }} - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "iter8-autox.name" . }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - spec: - serviceAccountName: {{ .Release.Name }} - containers: - - name: iter8-autox - image: {{ .Values.image }} - imagePullPolicy: Always - command: ["/bin/iter8"] - args: ["autox", "-l", "{{ .Values.logLevel }}"] - env: - - name: CONFIG - value: /config/config.yaml - volumeMounts: - - name: autox-config - mountPath: "/config" - readOnly: true - resources: - {{ toYaml .Values.resources | indent 10 | trim }} - securityContext: - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - volumes: - - name: autox-config - secret: - secretName: {{ .Release.Name }} diff --git a/charts/autox/templates/roles.yaml b/charts/autox/templates/roles.yaml deleted file mode 100644 index 1a78ca3c7..000000000 --- a/charts/autox/templates/roles.yaml +++ /dev/null @@ -1,69 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ $.Release.Name }}-argocd - namespace: argocd - {{ template "iter8-autox.labels" $ }} -rules: -- apiGroups: ["argoproj.io"] - resources: ["applications"] - verbs: ["create", "get", "patch", "delete"] -- apiGroups: [""] - resources: ["secrets"] - verbs: ["list"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ $.Release.Name }}-argocd - namespace: argocd - {{ template "iter8-autox.labels" $ }} -subjects: -- kind: ServiceAccount - name: {{ $.Release.Name }} - namespace: argocd -roleRef: - kind: Role - name: {{ $.Release.Name }}-argocd - apiGroup: rbac.authorization.k8s.io -{{- range $releaseGroupSpecName, $releaseGroupSpec := .Values.groups }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ $.Release.Name }}-{{ $releaseGroupSpecName }} - namespace: {{ $releaseGroupSpec.trigger.namespace }} - {{ template "iter8-autox.labels" $ }} -rules: -{{- if eq $releaseGroupSpec.trigger.resource "deployments" }} -- apiGroups: ["apps"] - resources: ["deployments"] - verbs: ["watch", "list", "get"] -{{- end }} -{{- if eq $releaseGroupSpec.trigger.resource "services" }} -- apiGroups: [""] - resources: ["services"] - verbs: ["watch", "list", "get"] -{{- end }} -{{- if eq $releaseGroupSpec.trigger.resource "ksvcs" }} -- apiGroups: ["serving.knative.dev"] - resources: ["services"] - verbs: ["watch", "list", "get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ $.Release.Name }}-{{ $releaseGroupSpecName }} - namespace: {{ $releaseGroupSpec.trigger.namespace }} - {{ template "iter8-autox.labels" $ }} -subjects: -- kind: ServiceAccount - name: {{ $.Release.Name }} - namespace: argocd -roleRef: - kind: Role - name: {{ $.Release.Name }}-{{ $releaseGroupSpecName }} - apiGroup: rbac.authorization.k8s.io -{{- end }} \ No newline at end of file diff --git a/charts/autox/templates/secret.yaml b/charts/autox/templates/secret.yaml deleted file mode 100644 index 4792ba90e..000000000 --- a/charts/autox/templates/secret.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Release.Name }} - namespace: argocd - {{ template "iter8-autox.labels" . }} -stringData: - config.yaml: | - specs: - {{- range $releaseGroupSpecName, $releaseGroupSpec := .Values.groups }} - {{ $releaseGroupSpecName }}: - trigger: - group: {{ $releaseGroupSpec.trigger.group }} - version: {{ $releaseGroupSpec.trigger.version }} - resource: {{ $releaseGroupSpec.trigger.resource }} - name: {{ $releaseGroupSpec.trigger.name }} - namespace: {{ $releaseGroupSpec.trigger.namespace }} - releaseSpecs: - {{- range $releaseSpecName, $releaseSpec := $releaseGroupSpec.specs }} - {{ $releaseSpecName }}: - name: {{ $releaseSpec.name }} - values: - {{ toYaml $releaseSpec.values | indent 14 | trim }} - version: {{ $releaseSpec.version }} - {{- end }} - {{- end }} -{{- range $releaseGroupSpecName, $releaseGroupSpec := .Values.groups }} -{{ $hash := print $releaseGroupSpec | sha256sum | trunc 5 }} ---- -apiVersion: v1 -kind: Secret -metadata: - name: autox-{{ $releaseGroupSpecName }}-{{ $hash }} - namespace: argocd - labels: - iter8.tools/autox-group: {{ $releaseGroupSpecName }} -{{- end }} \ No newline at end of file diff --git a/charts/autox/templates/serviceaccount.yaml b/charts/autox/templates/serviceaccount.yaml deleted file mode 100644 index a9a443e44..000000000 --- a/charts/autox/templates/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Release.Name }} - namespace: argocd - {{ template "iter8-autox.labels" . }} diff --git a/charts/autox/values.yaml b/charts/autox/values.yaml deleted file mode 100644 index b1ea5f199..000000000 --- a/charts/autox/values.yaml +++ /dev/null @@ -1,13 +0,0 @@ -### AutoX service image -image: iter8/iter8:0.15 - -logLevel: info - -### Resource limits -resources: - requests: - memory: "64Mi" - cpu: "250m" - limits: - memory: "128Mi" - cpu: "500m" \ No newline at end of file diff --git a/charts/iter8/templates/_k-cronjob.tpl b/charts/iter8/templates/_k-cronjob.tpl deleted file mode 100644 index 40beedb7b..000000000 --- a/charts/iter8/templates/_k-cronjob.tpl +++ /dev/null @@ -1,42 +0,0 @@ -{{- define "k.cronjob" -}} -apiVersion: batch/v1 -kind: CronJob -metadata: - name: {{ .Release.Name }}-{{ .Release.Revision }}-cronjob - annotations: - iter8.tools/group: {{ .Release.Name }} - iter8.tools/revision: {{ .Release.Revision | quote }} -spec: - schedule: {{ .Values.cronjobSchedule | quote }} - concurrencyPolicy: Forbid - jobTemplate: - spec: - template: - metadata: - labels: - iter8.tools/group: {{ .Release.Name }} - annotations: - sidecar.istio.io/inject: "false" - spec: - serviceAccountName: {{ default (printf "%s-iter8-sa" .Release.Name) .Values.serviceAccountName }} - containers: - - name: iter8 - image: {{ .Values.iter8Image }} - imagePullPolicy: Always - command: - - "/bin/sh" - - "-c" - - | - iter8 k run --namespace {{ .Release.Namespace }} --group {{ .Release.Name }} -l {{ .Values.logLevel }} --reuseResult - resources: - {{ toYaml .Values.resources | indent 14 | trim }} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsNonRoot: true - runAsUser: 1001040000 - restartPolicy: Never - backoffLimit: 0 -{{- end }} diff --git a/charts/iter8/templates/k8s.yaml b/charts/iter8/templates/k8s.yaml index 2b391f1b5..65e5a5c85 100644 --- a/charts/iter8/templates/k8s.yaml +++ b/charts/iter8/templates/k8s.yaml @@ -10,9 +10,7 @@ --- {{- if eq "job" .Values.runner }} {{ include "k.job" . }} -{{- else if eq "cronjob" .Values.runner }} -{{ include "k.cronjob" . }} {{- else if eq "none" .Values.runner }} {{- else }} -{{- fail "runner must be one of job, cronjob, or none" }} +{{- fail "runner must be one of job or none" }} {{- end }} diff --git a/charts/iter8/values.yaml b/charts/iter8/values.yaml index 208fdf1ce..e3a497bff 100644 --- a/charts/iter8/values.yaml +++ b/charts/iter8/values.yaml @@ -4,7 +4,8 @@ iter8Image: iter8/iter8:0.15 ### majorMinor is the minor version of Iter8 majorMinor: v0.15 -### runner for Kubernetes experiments may be job, cronjob, or none +# TODO: Should this only ever be job? +### runner for Kubernetes experiments may be job or none runner: none logLevel: info diff --git a/cmd/krun.go b/cmd/krun.go index 7443b5df4..2d713eb76 100644 --- a/cmd/krun.go +++ b/cmd/krun.go @@ -32,12 +32,5 @@ func newKRunCmd(kd *driver.KubeDriver, out io.Writer) *cobra.Command { }, } addExperimentGroupFlag(cmd, &actor.Group) - addReuseResult(cmd, &actor.ReuseResult) return cmd } - -// addReuseResult allows the experiment to reuse the experiment result for -// looping experiments -func addReuseResult(cmd *cobra.Command, reuseResultPtr *bool) { - cmd.Flags().BoolVar(reuseResultPtr, "reuseResult", false, "reuse experiment result; useful for experiments with multiple loops such as Kubernetes experiments with a cronjob runner") -} diff --git a/controllers/finalizer_test.go b/controllers/finalizer_test.go index 7f13d8e51..6ebfd28e0 100644 --- a/controllers/finalizer_test.go +++ b/controllers/finalizer_test.go @@ -14,7 +14,6 @@ import ( ) func TestAddFinalizer(t *testing.T) { - u := &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "v1", diff --git a/controllers/interface_test.go b/controllers/interface_test.go new file mode 100644 index 000000000..841453849 --- /dev/null +++ b/controllers/interface_test.go @@ -0,0 +1,12 @@ +package controllers + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetAllRoutemaps(t *testing.T) { + rm := DefaultRoutemaps{} + assert.NotNil(t, rm.GetAllRoutemaps()) +} diff --git a/metrics/server_test.go b/metrics/server_test.go index 1ac657ddf..b50c1bed4 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -337,7 +337,16 @@ func getTestRM(namespace, name string) *testroutemap { }, normalizedWeights: []uint32{1, 1}, } +} + +func TestTestRM(t *testing.T) { + namespace := "default" + name := "test" + rm := getTestRM(namespace, name) + assert.Equal(t, namespace, rm.GetNamespace()) + assert.Equal(t, name, rm.GetName()) + assert.Equal(t, []uint32{1, 1}, rm.Weights()) } func TestGetHTTPDashboardHelper(t *testing.T) { From 3d07dc741da319c2161d3aa74635b03099a6244b Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 09:06:17 -0400 Subject: [PATCH 022/121] Revert ready taskin workflows Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 69c101161..85819ea35 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -180,7 +180,6 @@ jobs: --set runner=job - name: try other iter8 k commands run: | - iter8 k log iter8 k delete From e46eecf440fe9c9b8fbabfa5bff54965a4632f77 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 09:36:47 -0400 Subject: [PATCH 023/121] Remove looped experiments in workflows Signed-off-by: Alan Cha --- .github/workflows/lintcharts2.yaml | 75 +----------------------------- 1 file changed, 1 insertion(+), 74 deletions(-) diff --git a/.github/workflows/lintcharts2.yaml b/.github/workflows/lintcharts2.yaml index 499f8cab5..65f67103a 100644 --- a/.github/workflows/lintcharts2.yaml +++ b/.github/workflows/lintcharts2.yaml @@ -33,10 +33,7 @@ jobs: if: steps.modified-files.outputs.any_modified == 'true' run: | helm template charts/iter8 \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ + --set tasks={http} \ --set http.url=http://httpbin.default/get \ --set runner=job >> iter8.yaml @@ -46,40 +43,6 @@ jobs: with: directory: iter8.yaml - http-looped-experiment: - name: Lint HTTP looped experiment - runs-on: ubuntu-latest - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - uses: azure/setup-helm@v3 - if: steps.modified-files.outputs.any_modified == 'true' - with: - token: ${{ secrets.GITHUB_TOKEN }} - - - name: Create Kubernetes YAML file - if: steps.modified-files.outputs.any_modified == 'true' - run: | - helm template charts/iter8 \ - --set tasks={http} \ - --set http.url="http://httpbin.default/get" \ - --set runner=cronjob \ - --set cronjobSchedule="*/1 * * * *" >> iter8.yaml - - - name: Lint Kubernetes YAML file - if: steps.modified-files.outputs.any_modified == 'true' - uses: stackrox/kube-linter-action@v1 - with: - directory: iter8.yaml - grpc-experiment: name: Lint gRPC experiment runs-on: ubuntu-latest @@ -114,39 +77,3 @@ jobs: uses: stackrox/kube-linter-action@v1 with: directory: iter8.yaml - - grpc-looped-experiment: - name: Lint gRPC looped experiment - runs-on: ubuntu-latest - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - uses: azure/setup-helm@v3 - if: steps.modified-files.outputs.any_modified == 'true' - with: - token: ${{ secrets.GITHUB_TOKEN }} - - - name: Create Kubernetes YAML file - if: steps.modified-files.outputs.any_modified == 'true' - run: | - helm template charts/iter8 \ - --set tasks={grpc} \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set runner=cronjob \ - --set cronjobSchedule="*/1 * * * *" >> iter8.yaml - - - name: Lint Kubernetes YAML file - if: steps.modified-files.outputs.any_modified == 'true' - uses: stackrox/kube-linter-action@v1 - with: - directory: iter8.yaml \ No newline at end of file From 4638a4896270d23bca73d5a114ac291e20dd01dd Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 09:41:49 -0400 Subject: [PATCH 024/121] Remove grafana flag Signed-off-by: Alan Cha --- base/collect_grpc.go | 105 ++++------------------- base/collect_grpc_test.go | 5 -- base/collect_http.go | 175 ++++---------------------------------- base/collect_http_test.go | 1 - 4 files changed, 32 insertions(+), 254 deletions(-) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index c8612f6d1..8cd70e986 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -39,11 +39,6 @@ type collectGRPCInputs struct { // Endpoints is used to define multiple endpoints to test Endpoints map[string]runner.Config `json:"endpoints" yaml:"endpoints"` - - // Determines if Grafana dashboard should be created - // dasboard vs report/assess tasks - // TODO: remove - Grafana bool `json:"grafana" yaml:"grafana"` } // collectGRPCTask enables load testing of gRPC services. @@ -123,11 +118,7 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) continue } - resultsKey := gRPCMetricPrefix + "-" + endpointID - if t.With.Grafana { - resultsKey = endpoint.Call - } - results[resultsKey] = igr + results[endpoint.Call] = igr } } else { // TODO: supply all the allowed options @@ -140,11 +131,7 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) return results, err } - resultsKey := gRPCMetricPrefix - if t.With.Grafana { - resultsKey = t.With.Call - } - results[resultsKey] = igr + results[t.With.Call] = igr } return results, err @@ -191,83 +178,23 @@ func (t *collectGRPCTask) run(exp *Experiment) error { if err = exp.Result.initInsightsWithNumVersions(1); err != nil { return err } - in := exp.Result.Insights - - if t.With.Grafana { - // push data to metrics service - ghzResult := GHZResult{ - EndpointResults: data, - Summary: *exp.Result.Insights, - } - - // get URL of metrics server from environment variable - metricsServerURL, ok := os.LookupEnv(MetricsServerURL) - if !ok { - errorMessage := "could not look up METRICS_SERVER_URL environment variable" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } - - if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, ghzResult); err != nil { - return err - } - } else { - // 4. Populate all metrics collected by this task - for provider, data := range data { - // populate grpc request count - // todo: this logic breaks for looped experiments. Fix when we get to loops. - m := provider + "/" + gRPCRequestCountMetricName - mm := MetricMeta{ - Description: "number of gRPC requests sent", - Type: CounterMetricType, - } - if err = in.updateMetric(m, mm, 0, float64(data.Count)); err != nil { - return err - } - // populate error count & rate - ec := float64(0) - for _, count := range data.ErrorDist { - ec += float64(count) - } - - // populate count - // todo: This logic breaks for looped experiments. Fix when we get to loops. - m = provider + "/" + gRPCErrorCountMetricName - mm = MetricMeta{ - Description: "number of responses that were errors", - Type: CounterMetricType, - } - if err = in.updateMetric(m, mm, 0, ec); err != nil { - return err - } + // push data to metrics service + ghzResult := GHZResult{ + EndpointResults: data, + Summary: *exp.Result.Insights, + } - // populate rate - // todo: This logic breaks for looped experiments. Fix when we get to loops. - m = provider + "/" + gRPCErrorRateMetricName - rc := float64(data.Count) - if rc != 0 { - mm = MetricMeta{ - Description: "fraction of responses that were errors", - Type: GaugeMetricType, - } - if err = in.updateMetric(m, mm, 0, ec/rc); err != nil { - return err - } - } + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } - // populate latency sample - m = provider + "/" + gRPCLatencySampleMetricName - mm = MetricMeta{ - Description: "gRPC Latency Sample", - Type: SampleMetricType, - Units: StringPointer("msec"), - } - lh := latencySample(data.Details) - if err = in.updateMetric(m, mm, 0, lh); err != nil { - return err - } - } + if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, ghzResult); err != nil { + return err } return nil diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index 3e6c0e308..a5eb67e43 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -1,8 +1,6 @@ package base import ( - "encoding/json" - "fmt" "os" "strings" "testing" @@ -277,9 +275,6 @@ func TestMockGRPCWithSLOsAndPercentiles(t *testing.T) { err = exp.Spec[0].run(exp) assert.NoError(t, err) - expjson, _ := json.Marshal(exp) - fmt.Println(string(expjson)) - expBytes, _ := yaml.Marshal(exp) log.Logger.Debug("\n" + string(expBytes)) diff --git a/base/collect_http.go b/base/collect_http.go index 28c16fc81..e1dc71197 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -12,7 +12,6 @@ import ( "fortio.org/fortio/fhttp" "fortio.org/fortio/periodic" - "fortio.org/fortio/stats" "github.com/imdario/mergo" log "github.com/iter8-tools/iter8/base/log" ) @@ -61,11 +60,6 @@ type collectHTTPInputs struct { // Endpoints is used to define multiple endpoints to test Endpoints map[string]endpoint `json:"endpoints" yaml:"endpoints"` - - // Determines if Grafana dashboard should be created - // dasboard vs report/assess tasks - // TODO: remove - Grafana bool `json:"grafana" yaml:"grafana"` } // FortioResult is the raw data sent to the metrics server @@ -333,12 +327,7 @@ func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResult continue } - // TODO: does ifr need to be a pointer? - resultsKey := httpMetricPrefix + "-" + endpointID - if t.With.Grafana { - resultsKey = endpoint.URL - } - results[resultsKey] = ifr + results[endpoint.URL] = ifr } } else { fo, err := getFortioOptions(t.With.endpoint) @@ -357,12 +346,7 @@ func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResult return nil, err } - // TODO: does ifr need to be a pointer? - resultsKey := httpMetricPrefix - if t.With.Grafana { - resultsKey = t.With.endpoint.URL - } - results[resultsKey] = ifr + results[t.With.endpoint.URL] = ifr } return results, err @@ -395,151 +379,24 @@ func (t *collectHTTPTask) run(exp *Experiment) error { if err != nil { return err } - in := exp.Result.Insights - - if t.With.Grafana { - // push data to metrics service - fortioResult := FortioResult{ - EndpointResults: data, - Summary: *exp.Result.Insights, - } - - // get URL of metrics server from environment variable - metricsServerURL, ok := os.LookupEnv(MetricsServerURL) - if !ok { - errorMessage := "could not look up METRICS_SERVER_URL environment variable" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } - - if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { - return err - } - } else { - for provider, data := range data { - // request count - m := provider + "/" + builtInHTTPRequestCountID - mm := MetricMeta{ - Description: "number of requests sent", - Type: CounterMetricType, - } - if err = in.updateMetric(m, mm, 0, float64(data.DurationHistogram.Count)); err != nil { - return err - } - - // error count & rate - val := float64(0) - for code, count := range data.RetCodes { - if t.errorCode(code) { - val += float64(count) - } - } - // error count - m = provider + "/" + builtInHTTPErrorCountID - mm = MetricMeta{ - Description: "number of responses that were errors", - Type: CounterMetricType, - } - if err = in.updateMetric(m, mm, 0, val); err != nil { - return err - } - - // error-rate - m = provider + "/" + builtInHTTPErrorRateID - rc := float64(data.DurationHistogram.Count) - if rc != 0 { - mm = MetricMeta{ - Description: "fraction of responses that were errors", - Type: GaugeMetricType, - } - if err = in.updateMetric(m, mm, 0, val/rc); err != nil { - return err - } - } - // mean-latency - m = provider + "/" + builtInHTTPLatencyMeanID - mm = MetricMeta{ - Description: "mean of observed latency values", - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Avg); err != nil { - return err - } - - // stddev-latency - m = provider + "/" + builtInHTTPLatencyStdDevID - mm = MetricMeta{ - Description: "standard deviation of observed latency values", - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.StdDev); err != nil { - return err - } - - // min-latency - m = provider + "/" + builtInHTTPLatencyMinID - mm = MetricMeta{ - Description: "minimum of observed latency values", - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Min); err != nil { - return err - } - - // max-latency - m = provider + "/" + builtInHTTPLatencyMaxID - mm = MetricMeta{ - Description: "maximum of observed latency values", - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*data.DurationHistogram.Max); err != nil { - return err - } + // push data to metrics service + fortioResult := FortioResult{ + EndpointResults: data, + Summary: *exp.Result.Insights, + } - // percentiles - for _, p := range data.DurationHistogram.Percentiles { - m = fmt.Sprintf("%v/%v%v", provider, builtInHTTPLatencyPercentilePrefix, p.Percentile) - mm = MetricMeta{ - Description: fmt.Sprintf("%v-th percentile of observed latency values", p.Percentile), - Type: GaugeMetricType, - Units: StringPointer("msec"), - } - if err = in.updateMetric(m, mm, 0, 1000.0*p.Value); err != nil { - return err - } - } + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } - // latency histogram - m = httpMetricPrefix + "/" + builtInHTTPLatencyHistID - mm = MetricMeta{ - Description: "Latency Histogram", - Type: HistogramMetricType, - Units: StringPointer("msec"), - } - lh := latencyHist(data.DurationHistogram) - if err = in.updateMetric(m, mm, 0, lh); err != nil { - return err - } - } + if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { + return err } return nil } - -// compute latency histogram by resampling -func latencyHist(hd *stats.HistogramData) []HistBucket { - buckets := []HistBucket{} - for _, v := range hd.Data { - buckets = append(buckets, HistBucket{ - Lower: v.Start * 1000.0, // sec to msec - Upper: v.End * 1000.0, - Count: uint64(v.Count), - }) - } - return buckets -} diff --git a/base/collect_http_test.go b/base/collect_http_test.go index f20baa08d..22b1442df 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -428,7 +428,6 @@ func TestRunCollectHTTPGrafana(t *testing.T) { endpoint: endpoint{ URL: baseURL + foo, }, - Grafana: true, }, } From 22af960e1203a1545fa0a16fac8cb32fd158c667 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 09:45:02 -0400 Subject: [PATCH 025/121] Remove extraneous consts and func Signed-off-by: Alan Cha --- base/collect_grpc.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index 8cd70e986..068b27d04 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -16,12 +16,8 @@ const ( CollectGRPCTaskName = "grpc" // gRPC metric prefix gRPCMetricPrefix = "grpc" - // gRPCRequestCountMetricName is name of the gRPC request count metric - gRPCRequestCountMetricName = "request-count" // gRPCErrorCountMetricName is name of the gRPC error count metric gRPCErrorCountMetricName = "error-count" - // gRPCErrorRateMetricName is name of the gRPC error rate metric - gRPCErrorRateMetricName = "error-rate" // gRPCLatencySampleMetricName is name of the gRPC latency sample metric gRPCLatencySampleMetricName = "latency" // countErrorsDefault is the default value which indicates if errors are counted @@ -137,15 +133,6 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) return results, err } -// latencySample extracts a latency sample from ghz result details -func latencySample(rd []runner.ResultDetail) []float64 { - f := make([]float64, len(rd)) - for i := 0; i < len(rd); i++ { - f[i] = float64(rd[i].Latency.Milliseconds()) - } - return f -} - // Run executes this task func (t *collectGRPCTask) run(exp *Experiment) error { // 1. initialize defaults From ecd0841484cebd902b980f3e2beba339563d04c0 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 09:59:33 -0400 Subject: [PATCH 026/121] Add assert Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 10 ++- action/assert.go | 113 ++++++++++++++++++++++++++++++++++ action/assert_test.go | 34 ++++++++++ cmd/kassert.go | 71 +++++++++++++++++++++ cmd/kassert_test.go | 98 +++++++++++++++++++++++++++++ 5 files changed, 325 insertions(+), 1 deletion(-) create mode 100644 action/assert.go create mode 100644 action/assert_test.go create mode 100644 cmd/kassert.go create mode 100644 cmd/kassert_test.go diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 85819ea35..b7603f83b 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -110,6 +110,7 @@ jobs: --set runner=job - name: try other iter8 k commands run: | + iter8 k assert -c completed -c nofailure --timeout 60s iter8 k log iter8 k delete @@ -180,6 +181,7 @@ jobs: --set runner=job - name: try other iter8 k commands run: | + iter8 k assert -c completed -c nofailure --timeout 60s iter8 k log iter8 k delete @@ -207,6 +209,9 @@ jobs: --set ready.timeout=60s \ --set http.url=http://httpbin.default \ --set runner=job + - name: k assert experiment completed without failures + run: | + iter8 k assert -c completed -c nofailure --timeout 60s readiness-with-namespace: name: Kubernetes readiness test with namespace @@ -233,4 +238,7 @@ jobs: --set ready.timeout=60s \ --set ready.namespace=default \ --set http.url=http://httpbin.default/get \ - --set runner=job \ No newline at end of file + --set runner=job + - name: k assert experiment completed without failures + run: | + iter8 k assert -c completed -c nofailure --timeout 60s \ No newline at end of file diff --git a/action/assert.go b/action/assert.go new file mode 100644 index 000000000..9500f5ac8 --- /dev/null +++ b/action/assert.go @@ -0,0 +1,113 @@ +package action + +import ( + "fmt" + "strings" + "time" + + "github.com/iter8-tools/iter8/base" + "github.com/iter8-tools/iter8/base/log" + "github.com/iter8-tools/iter8/driver" +) + +const ( + // Completed states that the experiment is complete + Completed = "completed" + // NoFailure states that none of the tasks in the experiment have failed + NoFailure = "nofailure" + // SLOs states that all app versions participating in the experiment satisfy SLOs + SLOs = "slos" +) + +// AssertOpts are the options used for asserting experiment results +type AssertOpts struct { + // Timeout is the duration to wait for conditions to be satisfied + Timeout time.Duration + // Conditions are checked by assert + Conditions []string + // RunOpts provides options relating to experiment resources + RunOpts +} + +// NewAssertOpts initializes and returns assert opts +func NewAssertOpts(kd *driver.KubeDriver) *AssertOpts { + return &AssertOpts{ + RunOpts: *NewRunOpts(kd), + } +} + +// KubeRun asserts conditions for a Kubernetes experiment +func (aOpts *AssertOpts) KubeRun() (bool, error) { + if err := aOpts.KubeDriver.Init(); err != nil { + return false, err + } + + return aOpts.Run(aOpts.KubeDriver) +} + +// Run builds the experiment and verifies assert conditions +func (aOpts *AssertOpts) Run(eio base.Driver) (bool, error) { + allGood, err := aOpts.verify(eio) + if err != nil { + return false, err + } + if !allGood { + log.Logger.Error("assert conditions failed") + return false, nil + } + return true, nil +} + +// verify implements the core logic of assert +func (aOpts *AssertOpts) verify(eio base.Driver) (bool, error) { + // timeSpent tracks how much time has been spent so far in assert attempts + var timeSpent, _ = time.ParseDuration("0s") + + // sleepTime specifies how long to sleep in between retries of asserts + var sleepTime, _ = time.ParseDuration("3s") + + // check assert conditions + for { + exp, err := base.BuildExperiment(eio) + if err != nil { + return false, err + } + + allGood := true + + for _, cond := range aOpts.Conditions { + if strings.ToLower(cond) == Completed { + c := exp.Completed() + allGood = allGood && c + if c { + log.Logger.Info("experiment completed") + } else { + log.Logger.Info("experiment did not complete") + } + } else if strings.ToLower(cond) == NoFailure { + nf := exp.NoFailure() + allGood = allGood && nf + if nf { + log.Logger.Info("experiment has no failure") + } else { + log.Logger.Info("experiment failed") + } + } else { + log.Logger.Error("unsupported assert condition detected; ", cond) + return false, fmt.Errorf("unsupported assert condition detected; %v", cond) + } + } + + if allGood { + log.Logger.Info("all conditions were satisfied") + return true, nil + } + if timeSpent >= aOpts.Timeout { + log.Logger.Info("not all conditions were satisfied") + return false, nil + } + log.Logger.Infof("sleeping %v ................................", sleepTime) + time.Sleep(sleepTime) + timeSpent += sleepTime + } +} diff --git a/action/assert_test.go b/action/assert_test.go new file mode 100644 index 000000000..54d7cdc66 --- /dev/null +++ b/action/assert_test.go @@ -0,0 +1,34 @@ +package action + +import ( + "context" + "os" + "testing" + + "github.com/iter8-tools/iter8/base" + "github.com/iter8-tools/iter8/driver" + "github.com/stretchr/testify/assert" + "helm.sh/helm/v3/pkg/cli" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestKubeAssert(t *testing.T) { + _ = os.Chdir(t.TempDir()) + // fix aOpts + aOpts := NewAssertOpts(driver.NewFakeKubeDriver(cli.New())) + aOpts.Conditions = []string{Completed, NoFailure, SLOs} + + byteArray, _ := os.ReadFile(base.CompletePath("../testdata/assertinputs", driver.ExperimentPath)) + _, _ = aOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + }, + StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, + }, metav1.CreateOptions{}) + + ok, err := aOpts.KubeRun() + assert.True(t, ok) + assert.NoError(t, err) +} diff --git a/cmd/kassert.go b/cmd/kassert.go new file mode 100644 index 000000000..1d484ad7e --- /dev/null +++ b/cmd/kassert.go @@ -0,0 +1,71 @@ +package cmd + +import ( + "errors" + "fmt" + "time" + + ia "github.com/iter8-tools/iter8/action" + "github.com/iter8-tools/iter8/base/log" + "github.com/iter8-tools/iter8/driver" + "github.com/spf13/cobra" +) + +// kassertDesc is the description of the k assert cmd +const kassertDesc = ` +Assert if the result of a Kubernetes experiment satisfies the specified conditions. If all conditions are satisfied, the command exits with code 0. Else, the command exits with code 1. + +Assertions are especially useful for automation inside CI/CD/GitOps pipelines. + +Supported conditions are 'completed', 'nofailure', 'slos', which indicate that the experiment has completed, none of the tasks have failed, and the SLOs are satisfied. + + iter8 k assert -c completed -c nofailure -c slos + # same as iter8 k assert -c completed,nofailure,slos + +You can optionally specify a timeout, which is the maximum amount of time to wait for the conditions to be satisfied: + + iter8 k assert -c completed,nofailure,slos -t 5s +` + +// newAssertCmd creates the Kubernetes assert command +func newKAssertCmd(kd *driver.KubeDriver) *cobra.Command { + actor := ia.NewAssertOpts(kd) + + cmd := &cobra.Command{ + Use: "assert", + Short: "Assert if Kubernetes experiment result satisfies conditions", + Long: kassertDesc, + SilenceUsage: true, + RunE: func(_ *cobra.Command, _ []string) error { + allGood, err := actor.KubeRun() + if err != nil { + return err + } + if !allGood { + e := errors.New("assert conditions failed") + log.Logger.Error(e) + return e + } + return nil + }, + } + // options specific to k assert + addExperimentGroupFlag(cmd, &actor.Group) + actor.EnvSettings = settings + + // options shared with assert + addConditionFlag(cmd, &actor.Conditions) + addTimeoutFlag(cmd, &actor.Timeout) + return cmd +} + +// addConditionFlag adds the condition flag to command +func addConditionFlag(cmd *cobra.Command, conditionPtr *[]string) { + cmd.Flags().StringSliceVarP(conditionPtr, "condition", "c", nil, fmt.Sprintf("%v | %v | %v; can specify multiple or separate conditions with commas;", ia.Completed, ia.NoFailure, ia.SLOs)) + _ = cmd.MarkFlagRequired("condition") +} + +// addTimeoutFlag adds timeout flag to command +func addTimeoutFlag(cmd *cobra.Command, timeoutPtr *time.Duration) { + cmd.Flags().DurationVar(timeoutPtr, "timeout", 0, "timeout duration (e.g., 5s)") +} diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go new file mode 100644 index 000000000..c2dc5e649 --- /dev/null +++ b/cmd/kassert_test.go @@ -0,0 +1,98 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + + "fortio.org/fortio/fhttp" + "github.com/iter8-tools/iter8/base" + id "github.com/iter8-tools/iter8/driver" + "github.com/stretchr/testify/assert" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestKAssert(t *testing.T) { + _ = os.Chdir(t.TempDir()) + + // create and configure HTTP endpoint for testing + mux, addr := fhttp.DynamicHTTPServer(false) + url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) + var verifyHandlerCalled bool + mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + + // create experiment.yaml + base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, id.ExperimentPath) + + // run test + testAssert(t, id.ExperimentPath, url, "output/kassert.txt", false) + // sanity check -- handler was called + assert.True(t, verifyHandlerCalled) +} + +func TestKAssertFailsSLOs(t *testing.T) { + _ = os.Chdir(t.TempDir()) + + // create and configure HTTP endpoint for testing + mux, addr := fhttp.DynamicHTTPServer(false) + url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) + var verifyHandlerCalled bool + mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + + // create experiment.yaml + base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment_fails.tpl"), url, id.ExperimentPath) + + // run test + testAssert(t, id.ExperimentPath, url, "output/kassertfails.txt", true) + // sanity check -- handler was called + assert.True(t, verifyHandlerCalled) +} + +func testAssert(t *testing.T, experiment string, url string, expectedOutputFile string, expectError bool) { + tests := []cmdTestCase{ + // k launch + { + name: "k launch", + cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http,assess} --set http.url=%s --set http.duration=2s", base.CompletePath("../charts", "iter8"), url), + golden: base.CompletePath("../testdata", "output/klaunch.txt"), + }, + // k run + { + name: "k run", + cmd: "k run -g default --namespace default", + }, + // k assert + { + name: "k assert", + cmd: "k assert -c completed -c nofailure -c slos", + golden: base.CompletePath("../testdata", expectedOutputFile), + wantError: expectError, + }, + } + + // fake kube cluster + *kd = *id.NewFakeKubeDriver(settings) + + // read experiment from file created by caller + byteArray, _ := os.ReadFile(filepath.Clean(experiment)) + _, _ = kd.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + }, + StringData: map[string]string{id.ExperimentPath: string(byteArray)}, + }, metav1.CreateOptions{}) + + _, _ = kd.Clientset.BatchV1().Jobs("default").Create(context.TODO(), &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-1-job", + Namespace: "default", + }, + }, metav1.CreateOptions{}) + + runTestActionCmd(t, tests) +} From cc8ac2d17a9adaa04ceeef5e6cb4a5b837f049e5 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 14:32:16 -0400 Subject: [PATCH 027/121] Fix test Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index b7603f83b..520078723 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -241,4 +241,4 @@ jobs: --set runner=job - name: k assert experiment completed without failures run: | - iter8 k assert -c completed -c nofailure --timeout 60s \ No newline at end of file + iter8 k assert -n experiments -c completed -c nofailure --timeout 60s \ No newline at end of file From 771e3e0c98c3dc4cd1e9a6ec2e3eb79089faf4e1 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 14:34:33 -0400 Subject: [PATCH 028/121] Remove custommetrics Signed-off-by: Alan Cha --- action/doc.go | 2 +- base/custom_metrics.go | 341 ------- base/custom_metrics_test.go | 863 ------------------ base/experiment.go | 110 +-- base/experiment_test.go | 7 - charts/iter8/templates/_experiment.tpl | 6 +- .../iter8/templates/_task-custommetrics.tpl | 6 - testdata/experiment_db.yaml | 9 - 8 files changed, 9 insertions(+), 1335 deletions(-) delete mode 100644 base/custom_metrics.go delete mode 100644 base/custom_metrics_test.go delete mode 100644 charts/iter8/templates/_task-custommetrics.tpl delete mode 100644 testdata/experiment_db.yaml diff --git a/action/doc.go b/action/doc.go index 7ba4946fc..a78b33500 100644 --- a/action/doc.go +++ b/action/doc.go @@ -1,5 +1,5 @@ // Package action contains the logic for each action that Iter8 can perform. // -// This is a library for calling top-level Iter8 actions like 'launch'. +// This is a library for calling top-level Iter8 actions like 'launch' and 'assert'. // Actions approximately match the command line invocations that the Iter8 CLI uses. package action diff --git a/base/custom_metrics.go b/base/custom_metrics.go deleted file mode 100644 index 0201989df..000000000 --- a/base/custom_metrics.go +++ /dev/null @@ -1,341 +0,0 @@ -package base - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "net/http" - "strconv" - "strings" - - "time" - - "github.com/itchyny/gojq" - log "github.com/iter8-tools/iter8/base/log" - "sigs.k8s.io/yaml" -) - -// ProviderSpec specifies how to get metrics from a provider -type ProviderSpec struct { - // URL is the database endpoint - URL string `json:"url" yaml:"url"` - - // Method is the HTTP method that needs to be used - Method string `json:"method" yaml:"method"` - - // Headers is the set of HTTP headers that need to be sent - Headers map[string]string `json:"headers" yaml:"headers"` - - // Metrics is the set of metrics that can be obtained - Metrics []Metric `json:"metrics" yaml:"metrics"` -} - -// Metric defines how to construct HTTP requests and parse HTTP responses -// when querying a database for a metric. Metric struct also includes metadata -// such as the name and description of the metric. -type Metric struct { - // Name is the name of the metric - Name string `json:"name" yaml:"name"` - - // Description is the description of the metric - Description *string `json:"description,omitempty" yaml:"description,omitempty"` - - // Type is the type of the metric, either gauge or counter - Type string `json:"type" yaml:"type"` - - // Units is the unit of the metric, which can be omitted for unitless metrics - Units *string `json:"units,omitempty" yaml:"units,omitempty"` - - // Params is the set of HTTP parameters that need to be sent - Params *[]HTTPParam `json:"params,omitempty" yaml:"params,omitempty"` - - // Body is the HTTP request body that needs to be sent - Body *string `json:"body,omitempty" yaml:"body,omitempty"` - - // JqExpression is the jq expression that can extract the value from the HTTP - // response - JqExpression string `json:"jqExpression" yaml:"jqExpression"` -} - -// HTTPParam defines an HTTP parameter -type HTTPParam struct { - // Name is the name of the HTTP parameter - Name string `json:"name" yaml:"name"` - - // Value is the value of the HTTP parameter - Value string `json:"value" yaml:"value"` -} - -// customMetricsInputs is the input to the custommetrics task -type customMetricsInputs struct { - // Template maps the provider to its template URL - Templates map[string]string `json:"templates" yaml:"templates"` - - // Values is used for substituting placeholders in metric templates. - Values map[string]interface{} `json:"values" yaml:"values"` - - // VersionValues are per version values that override values - // For each version, its version values are coalesced with values - // The length of this slice equals the number of versions - VersionValues []map[string]interface{} `json:"versionValues" yaml:"versionValues"` -} - -const ( - // CustomMetricsTaskName is the name of this task which fetches metrics templates, constructs metric specs, and then fetches metrics for each version from metric provider databases - CustomMetricsTaskName = "custommetrics" - - // startingTime specifies how far back to go in time for a specific version - // startingTimeStr is starting time placeholder - startingTimeStr = "startingTime" - - // how much time has elapsed between startingTime and now - elapsedTimeSecondsStr = "elapsedTimeSeconds" -) - -// customMetricsTask enables collection of custom metrics from databases -type customMetricsTask struct { - TaskMeta - With customMetricsInputs `json:"with" yaml:"with"` -} - -// initializeDefaults sets default values for the custom metrics task -func (t *customMetricsTask) initializeDefaults() { - // initialize versionValues if absent - if len(t.With.VersionValues) == 0 { - t.With.VersionValues = []map[string]interface{}{t.With.Values} - } -} - -// validate task inputs -func (t *customMetricsTask) validateInputs() error { - return nil -} - -// getElapsedTimeSeconds using values and experiment -// -// elapsedTime = currentTime - startingTime -// -// First, check for startingTime in values. -// If unavailable, use startingTime of the experiment. -func getElapsedTimeSeconds(values map[string]interface{}, exp *Experiment) (int64, error) { - startingTime := exp.Result.StartTime.Time - if values[startingTimeStr] != nil { - var err error - // Calling Parse() method with its parameters - startingTime, err = time.Parse(time.RFC3339, fmt.Sprintf("%v", values[startingTimeStr])) - - if err != nil { - return 0, errors.New("cannot parse startingTime") - } - } - - // calculate the elapsedTimeSeconds based on the startingTime if it has been provided - currentTime := time.Now() - return int64(currentTime.Sub(startingTime).Seconds()), nil -} - -// construct request to database and return extracted metric value -// -// bool return value represents whether the pipeline was able to run to -// completion (prevents double error statement) -func queryDatabaseAndGetValue(template ProviderSpec, metric Metric) (interface{}, bool) { - var requestBody io.Reader - if metric.Body != nil { - requestBody = strings.NewReader(*metric.Body) - } - - // create a new HTTP request - req, err := http.NewRequest(template.Method, template.URL, requestBody) - if err != nil { - log.Logger.Error("could not create new request for metric ", metric.Name, ": ", err) - return nil, false - } - - // iterate through headers - for headerName, headerValue := range template.Headers { - req.Header.Add(headerName, headerValue) - log.Logger.Debug("add header: ", headerName, ", value: ", headerValue) - } - req.Header.Add("Content-Type", "application/json;charset=utf-8") - - // add query params - q := req.URL.Query() - params := metric.Params - for _, param := range *params { - q.Add(param.Name, param.Value) - log.Logger.Debug("add param: ", param.Name, ", value: ", param.Value) - } - req.URL.RawQuery = q.Encode() - - // send request - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - log.Logger.Error("could not request metric ", metric.Name, ": ", err.Error()) - return nil, false - } - defer func() { - _ = resp.Body.Close() - }() - - // read response responseBody - responseBody, err := io.ReadAll(resp.Body) - if err != nil { - log.Logger.Error("could not read response body for metric ", metric.Name, ": ", err) - return nil, false - } - - log.Logger.Debug("response body: ", string(responseBody)) - - // JSON parse response body - var jsonBody interface{} - err = json.Unmarshal([]byte(responseBody), &jsonBody) - if err != nil { - log.Logger.Error("could not JSON parse response body for metric ", metric.Name, ": ", err) - return nil, false - } - - // perform jq expression - query, err := gojq.Parse(metric.JqExpression) - if err != nil { - log.Logger.Error("could not parse jq expression \""+metric.JqExpression+"\" for metric ", metric.Name, ": ", err) - return nil, false - } - iter := query.Run(jsonBody) - - value, ok := iter.Next() - if !ok { - log.Logger.Error("could not extract value with jq expression for metric ", metric.Name, ": ", err) - return nil, false - } - - return value, true -} - -// run executes this task -func (t *customMetricsTask) run(exp *Experiment) error { - // validate inputs - var err error - - err = t.validateInputs() - if err != nil { - return err - } - - // initialize defaults - t.initializeDefaults() - - err = exp.Result.initInsightsWithNumVersions(len(t.With.VersionValues)) - if err != nil { - return err - } - - // collect metrics from all providers and for all versions - for providerName, url := range t.With.Templates { - // finalize metrics spec - template, err := getTextTemplateFromURL(url) - if err != nil { - return err - } - - for i, versionValues := range t.With.VersionValues { - // merge values - vals, err := mustMergeOverwrite(t.With.Values, versionValues) - if err != nil { - return err - } - values := vals.(map[string]interface{}) - if len(values) == 0 { - values = make(map[string]interface{}) - } - // add elapsedTimeSeconds - elapsedTimeSeconds, err := getElapsedTimeSeconds(values, exp) - if err != nil { - return err - } - values[elapsedTimeSecondsStr] = elapsedTimeSeconds - - // get the metrics spec - var buf bytes.Buffer - err = template.Execute(&buf, values) - if err != nil { - log.Logger.Error("cannot execute metrics spec with values", err) - log.Logger.Error("metrics spec: ", buf.String()) - log.Logger.Error("values: ", values) - return err - } - - bytes, _ := io.ReadAll(&buf) - var provider ProviderSpec - err = yaml.Unmarshal(bytes, &provider) - if err != nil { - log.Logger.Error("cannot unmarshal provider spec", err) - log.Logger.Error("provider spec: ", string(bytes)) - return err - } - log.Logger.Debugf("provider spec %v for version %v\n", providerName, i) - log.Logger.Debug("--------------------------------") - log.Logger.Debug(string(bytes)) - - // get each metric - for _, metric := range provider.Metrics { - log.Logger.Debug("query for metric ", metric.Name) - - // perform database query and extract metric value - val, ok := queryDatabaseAndGetValue(provider, metric) - - // check if there were any issues querying database and extracting value - if !ok { - log.Logger.Error("could not query for metric ", metric.Name) - continue - } - - // do not save value if it has no value - if val == nil { - log.Logger.Error("could not extract non-nil value for metric ", metric.Name) - continue - } - - // determine metric type - var metricType MetricType - if metric.Type == "gauge" { - metricType = GaugeMetricType - } else if metric.Type == "counter" { - metricType = CounterMetricType - } - - // finalize metric data - mm := MetricMeta{ - Description: *metric.Description, - Type: metricType, - Units: metric.Units, - } - - // convert value to float - valueString := fmt.Sprint(val) - floatValue, err := strconv.ParseFloat(valueString, 64) - if err != nil { - log.Logger.Error("could not parse string \""+valueString+"\" to float: ", err) - continue - } - - if math.IsNaN(floatValue) { - log.Logger.Debug("metric value is NaN", errors.New("metric value is NaN - ignored")) - continue - } - - err = exp.Result.Insights.updateMetric(providerName+"/"+metric.Name, mm, i, floatValue) - - if err != nil { - log.Logger.Error("could not add update metric", err) - continue - } - } - } - } - - return nil -} diff --git a/base/custom_metrics_test.go b/base/custom_metrics_test.go deleted file mode 100644 index 797ec9e37..000000000 --- a/base/custom_metrics_test.go +++ /dev/null @@ -1,863 +0,0 @@ -package base - -import ( - "io" - "net/http" - "net/url" - "os" - "strings" - "testing" - - "github.com/jarcoal/httpmock" - "github.com/stretchr/testify/assert" -) - -const ( - queryString = "?query=" - exampleQueryParameter = "example query parameter\n" - exampleRequestBody = "example request body\n" - - // the provider URL is mocked - cePromProviderURL = "https://raw.githubusercontent.com/iter8-tools/iter8/master/testdata/metrics/test-ce.metrics.yaml" - testCE = "test-ce" - testCEPromURL = `test-database.com/prometheus/api/v1/query` - testCERequestCount = "sum(last_over_time(ibm_codeengine_application_requests_total{\n" + - "}[0s])) or on() vector(0)\n" - testCEErrorCount = "sum(last_over_time(ibm_codeengine_application_requests_total{\n" + - " ibm_codeengine_status!=\"200\",\n" + - "}[0s])) or on() vector(0)\n" - testCEErrorRate = "sum(last_over_time(ibm_codeengine_application_requests_total{\n" + - " ibm_codeengine_status!=\"200\",\n" + - "}[0s])) or on() vector(0)/sum(last_over_time(ibm_codeengine_application_requests_total{\n" + - "}[0s])) or on() vector(0)\n" - testCERequestCountWithRevisionName = "sum(last_over_time(ibm_codeengine_application_requests_total{\n" + - " ibm_codeengine_revision_name=\"v1\",\n" + - "}[0s])) or on() vector(0)\n" - testCEErrorCountWithRevisionName = "sum(last_over_time(ibm_codeengine_application_requests_total{\n" + - " ibm_codeengine_status!=\"200\",\n" + - " ibm_codeengine_revision_name=\"v1\",\n" + - "}[0s])) or on() vector(0)\n" - testCEErrorRateWithRevisionName = "sum(last_over_time(ibm_codeengine_application_requests_total{\n" + - " ibm_codeengine_status!=\"200\",\n" + - " ibm_codeengine_revision_name=\"v1\",\n" + - "}[0s])) or on() vector(0)/sum(last_over_time(ibm_codeengine_application_requests_total{\n" + - " ibm_codeengine_revision_name=\"v1\",\n" + - "}[0s])) or on() vector(0)\n" - - // the provider URL is mocked - testProviderURL = "https://raw.githubusercontent.com/iter8-tools/iter8/master/testdata/metrics/test-request-body.metrics.yaml" - testRequestBody = "test-request-body" - - // the provider URL is mocked - istioPromProviderURL = "https://raw.githubusercontent.com/iter8-tools/iter8/master/custommetrics/istio-prom.tpl" - - istioPromRequestCount = "sum(last_over_time(istio_requests_total{\n" + - " destination_workload=\"myApp\",\n" + - " destination_workload_namespace=\"production\",\n" + - " reporter=\"destination\",\n" + - "}[0s])) or on() vector(0)" - istioPromErrorCount = "sum(last_over_time(istio_requests_total{\n" + - " response_code=~'5..',\n" + - " destination_workload=\"myApp\",\n" + - " destination_workload_namespace=\"production\",\n" + - " reporter=\"destination\",\n" + - "}[0s])) or on() vector(0)" - istioPromErrorRate = "(sum(last_over_time(istio_requests_total{\n" + - " response_code=~'5..',\n" + - " destination_workload=\"myApp\",\n" + - " destination_workload_namespace=\"production\",\n" + - " reporter=\"destination\",\n" + - "}[0s])) or on() vector(0))/(sum(last_over_time(istio_requests_total{\n" + - " destination_workload=\"myApp\",\n" + - " destination_workload_namespace=\"production\",\n" + - " reporter=\"destination\",\n" + - "}[0s])) or on() vector(0))" - istioPromMeanLatency = "(sum(last_over_time(istio_request_duration_milliseconds_sum{\n" + - " destination_workload=\"myApp\",\n" + - " destination_workload_namespace=\"production\",\n" + - " reporter=\"destination\",\n" + - "}[0s])) or on() vector(0))/(sum(last_over_time(istio_requests_total{\n" + - " destination_workload=\"myApp\",\n" + - " destination_workload_namespace=\"production\",\n" + - " reporter=\"destination\",\n" + - "}[0s])) or on() vector(0))" - istioPromLatencyP90 = "histogram_quantile(0.90, sum(rate(istio_request_duration_milliseconds_bucket{\n" + - " destination_workload=\"myApp\",\n" + - " destination_workload_namespace=\"production\",\n" + - " reporter=\"destination\",\n" + - "}[0s])) by (le))" -) - -func getCustomMetricsTask(providerName string, providerURL string) *customMetricsTask { - // valid collect database task... should succeed - ct := &customMetricsTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(CustomMetricsTaskName), - }, - With: customMetricsInputs{ - Templates: map[string]string{providerName: providerURL}, - }, - } - return ct -} - -// test getElapsedTimeSeconds() -func TestGetElapsedTimeSeconds(t *testing.T) { - _ = os.Chdir(t.TempDir()) - versionValues := map[string]interface{}{ - "startingTime": "2020-02-01T09:44:40Z", - } - - exp := &Experiment{ - Spec: []Task{}, - Result: &ExperimentResult{}, - } - - // this should add a startingTime that will be overwritten by the one in - // versionValues - exp.initResults(1) - - elapsedTimeSeconds, _ := getElapsedTimeSeconds(versionValues, exp) - - // elapsedTimeSeconds should be a large number - // - // if getElapsedTimeSeconds() used the starting time from the experiment instead of - // the one from versionValues, the elapsed time would be 0 or close to 0 - assert.Equal(t, elapsedTimeSeconds > 1000000, true) -} - -// test if a user sets startingTime incorrectly getElapsedTimeSeconds() -func TestStartingTimeFormatError(t *testing.T) { - _ = os.Chdir(t.TempDir()) - versionValues := map[string]interface{}{ - "startingTime": "1652935205", - } - - exp := &Experiment{ - Spec: []Task{}, - Result: &ExperimentResult{}, - } - - // this should add a startingTime that will be overwritten by the one in - // versionValues - exp.initResults(1) - _, err := getElapsedTimeSeconds(versionValues, exp) - assert.Error(t, err) -} - -// test istio-prom provider spec -func TestIstioProm(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", "istio-prom.tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask("istio-prom", istioPromProviderURL) - ct.With.Values = map[string]interface{}{"latencyPercentiles": []string{"90"}} - ct.With.VersionValues = []map[string]interface{}{{ - "labels": map[string]interface{}{ - "reporter": "destination", - "destination_workload": "myApp", - "destination_workload_namespace": "production", - }, - "elapsedTimeSeconds": "5", - }} - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, - httpmock.NewStringResponder(200, tplString)) - - // mock Istio Prometheus server - httpmock.RegisterResponder(http.MethodGet, "http://prometheus.istio-system:9090/api/v1/query", - func(req *http.Request) (*http.Response, error) { - queryParam := strings.TrimSpace(req.URL.Query().Get("query")) - - switch queryParam { - case istioPromRequestCount: - return httpmock.NewStringResponse(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645602108.839, - "43" - ] - } - ] - } - }`), nil - - case istioPromErrorCount: - return httpmock.NewStringResponse(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645602108.839, - "6" - ] - } - ] - } - }`), nil - - case istioPromErrorRate: - return httpmock.NewStringResponse(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645602108.839, - "0.13953488372093023" - ] - } - ] - } - }`), nil - - case istioPromMeanLatency: - return httpmock.NewStringResponse(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645602108.839, - "52" - ] - } - ] - } - }`), nil - - case istioPromLatencyP90: - return httpmock.NewStringResponse(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1945602108.839, - "64" - ] - } - ] - } - }`), nil - } - - return nil, nil - }) - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - - err = ct.run(exp) - - // test should not fail - assert.NoError(t, err) - - // all three metrics should exist and have values - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["istio-prom/request-count"][0], float64(43)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["istio-prom/error-count"][0], float64(6)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["istio-prom/error-rate"][0], 0.13953488372093023) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["istio-prom/latency-mean"][0], float64(52)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["istio-prom/latency-p90"][0], float64(64)) -} - -func TestNaN(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", "nan.tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask("nan", "http://url") - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, "http://url", - httpmock.NewStringResponder(200, tplString)) - - // mock provider - httpmock.RegisterResponder(http.MethodGet, "http://url/query", - func(req *http.Request) (*http.Response, error) { - queryParam := strings.TrimSpace(req.URL.Query().Get("query")) - t.Logf("queryParam = %s", queryParam) - - switch queryParam { - case "query-tonumber": - return httpmock.NewStringResponse(200, `{"value": "NaN"}`), nil - case "query-no-tonumber": - return httpmock.NewStringResponse(200, `{"value": "NaN"}`), nil - } - - return nil, nil - }) - - // experiment - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - - err = ct.run(exp) - - // task run should not fail - assert.NoError(t, err) - - // no metrics should be recorded - assert.NotContains(t, exp.Result.Insights.NonHistMetricValues[0], "nan/query-tonumber") - assert.NotContains(t, exp.Result.Insights.NonHistMetricValues[0], "nan/query-no-tonumber") -} - -// basic test with one version, mimicking Code Engine -// one version, three successful metrics -func TestCEOneVersion(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", "test-ce.tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask(testCE, cePromProviderURL) - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, - httpmock.NewStringResponder(200, tplString)) - - // request-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645602108.839, - "43" - ] - } - ] - } - }`)) - - // error-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645648760.725, - "6" - ] - } - ] - } - }`)) - - // error-rate - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645043851.825, - "0.13953488372093023" - ] - } - ] - } - }`)) - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - - err = ct.run(exp) - - // test should not fail - assert.NoError(t, err) - - // all three metrics should exist and have values - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/request-count"][0], float64(43)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-count"][0], float64(6)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-rate"][0], 0.13953488372093023) -} - -// basic test with versionValues, mimicking Code Engine -// one version, three successful metrics -func TestCEVersionValues(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", "test-ce.tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask(testCE, cePromProviderURL) - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, - httpmock.NewStringResponder(200, tplString)) - - ct.With.VersionValues = []map[string]interface{}{{ - "ibm_codeengine_revision_name": "v1", - }} - - // request-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCountWithRevisionName), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645602108.839, - "43" - ] - } - ] - } - }`)) - - // error-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCountWithRevisionName), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645648760.725, - "6" - ] - } - ] - } - }`)) - - // error-rate - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRateWithRevisionName), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645043851.825, - "0.13953488372093023" - ] - } - ] - } - }`)) - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - - err = ct.run(exp) - - // test should not fail - assert.NoError(t, err) - - // all three metrics should exist and have values - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/request-count"][0], float64(43)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-count"][0], float64(6)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-rate"][0], 0.13953488372093023) -} - -// test with one version and improper authorization, mimicking Code Engine -// one version, three successful metrics -func TestCEUnauthorized(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", "test-ce.tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask(testCE, cePromProviderURL) - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, - httpmock.NewStringResponder(200, tplString)) - - // request-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), - httpmock.NewStringResponder(401, `Unauthorized`)) - - // error-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), - httpmock.NewStringResponder(401, `Unauthorized`)) - - // error-rate - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), - httpmock.NewStringResponder(401, `Unauthorized`)) - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - - err = ct.run(exp) - - // test should not fail - assert.NoError(t, err) - - // no values should be collected because of unauthorized requests - assert.Equal(t, len(exp.Result.Insights.NonHistMetricValues[0]), 0) -} - -// test with one version with some values, mimicking Code Engine -// one version, three successful metrics, one without values -func TestCESomeValues(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", "test-ce.tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask(testCE, cePromProviderURL) - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, - httpmock.NewStringResponder(200, tplString)) - - // request-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [] - } - }`)) - - // error-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645648760.725, - "6" - ] - } - ] - } - }`)) - - // error-rate - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645043851.825, - "0.13953488372093023" - ] - } - ] - } - }`)) - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - - err = ct.run(exp) - - // test should not fail - assert.NoError(t, err) - - // two metrics should exist and have values - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-count"][0], float64(6)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-rate"][0], 0.13953488372093023) - - // request-count should not exist because there was no value from response - _, ok := exp.Result.Insights.NonHistMetricValues[0]["test-ce/request-count"] - assert.Equal(t, ok, false) -} - -// test with two version with some values, mimicking Code Engine -// two versions, four successful metrics, two without values -func TestCEMultipleVersions(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", "test-ce.tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask(testCE, cePromProviderURL) - - ct.With.VersionValues = []map[string]interface{}{{}, {}} - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, - httpmock.NewStringResponder(200, tplString)) - - // request-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [] - } - }`)) - - // error-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645648760.725, - "6" - ] - } - ] - } - }`)) - - // error-rate - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645043851.825, - "0.13953488372093023" - ] - } - ] - } - }`)) - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(2) - - err = ct.run(exp) - - // test should not fail - assert.NoError(t, err) - - // two metrics should exist and have values - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-count"][0], float64(6)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[1]["test-ce/error-count"][0], float64(6)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-rate"][0], 0.13953488372093023) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[1]["test-ce/error-rate"][0], 0.13953488372093023) - - // request-count should not exist because there was no value from response - _, ok := exp.Result.Insights.NonHistMetricValues[0]["test-ce/request-count"] - assert.Equal(t, ok, false) -} - -// test with two version with some values, mimicking Code Engine -// two versions, four successful metrics, two without values -func TestCEMultipleVersionsAndMetrics(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", "test-ce.tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask(testCE, cePromProviderURL) - - ct.With.VersionValues = []map[string]interface{}{{}, {}} - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, - httpmock.NewStringResponder(200, tplString)) - - // request-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCERequestCount), httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [] - } - }`)) - - // error-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorCount), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645648760.725, - "6" - ] - } - ] - } - }`)) - - // error-rate - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(testCEErrorRate), - httpmock.NewStringResponder(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645043851.825, - "0.13953488372093023" - ] - } - ] - } - }`)) - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(2) - - err = ct.run(exp) - - // test should not fail - assert.NoError(t, err) - - // two metrics should exist and have values - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-count"][0], float64(6)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[1]["test-ce/error-count"][0], float64(6)) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0]["test-ce/error-rate"][0], 0.13953488372093023) - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[1]["test-ce/error-rate"][0], 0.13953488372093023) - - // request-count should not exist because there was no value from response - _, ok := exp.Result.Insights.NonHistMetricValues[0]["test-ce/request-count"] - assert.Equal(t, ok, false) -} - -// basic test with a request body -func TestRequestBody(t *testing.T) { - dat, err := os.ReadFile(CompletePath("../testdata/custommetrics", testRequestBody+".tpl")) - assert.NoError(t, err) - tplString := string(dat) - - _ = os.Chdir(t.TempDir()) - startHTTPMock(t) - ct := getCustomMetricsTask(testRequestBody, testProviderURL) - - // mock provider URL - httpmock.RegisterResponder(http.MethodGet, istioPromProviderURL, - httpmock.NewStringResponder(200, tplString)) - - // request-count - httpmock.RegisterResponder(http.MethodGet, testCEPromURL+queryString+url.QueryEscape(exampleQueryParameter), - func(req *http.Request) (*http.Response, error) { - if req.Body != nil { - b, err := io.ReadAll(req.Body) - if err != nil { - panic(err) - } - - if string(b) == exampleRequestBody { - return httpmock.NewStringResponse(200, `{ - "status": "success", - "data": { - "resultType": "vector", - "result": [ - { - "metric": {}, - "value": [ - 1645602108.839, - "43" - ] - } - ] - } - }`), nil - } - } - - return nil, nil - }) - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - - err = ct.run(exp) - - // test should not fail - assert.NoError(t, err) - - assert.Equal(t, exp.Result.Insights.NonHistMetricValues[0][testRequestBody+"/request-count"][0], float64(43)) -} diff --git a/base/experiment.go b/base/experiment.go index 322ce2072..1a6e0a7cf 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "math" - "reflect" "regexp" "strconv" "strings" @@ -243,14 +242,6 @@ func (s *ExperimentSpec) UnmarshalJSON(data []byte) error { return e } tsk = rt - case CustomMetricsTaskName: - cdt := &customMetricsTask{} - if err := json.Unmarshal(tBytes, cdt); err != nil { - e := errors.New("json unmarshal error") - log.Logger.WithStackTrace(err.Error()).Error(e) - return e - } - tsk = cdt case CollectHTTPTaskName: cht := &collectHTTPTask{} if err := json.Unmarshal(tBytes, cht); err != nil { @@ -288,96 +279,6 @@ func (s *ExperimentSpec) UnmarshalJSON(data []byte) error { return nil } -// metricTypeMatch checks if metric value is a match for its type -func metricTypeMatch(t MetricType, val interface{}) bool { - switch v := val.(type) { - case float64: - return t == CounterMetricType || t == GaugeMetricType - case []float64: - return t == SampleMetricType - case []HistBucket: - return t == HistogramMetricType - case *summarymetrics.SummaryMetric: - return t == SummaryMetricType - default: - log.Logger.Error("unsupported type for metric value: ", v) - return false - } -} - -// updateMetricValueScalar updates a scalar metric value for a given version -func (in *Insights) updateMetricValueScalar(m string, i int, val float64) { - in.NonHistMetricValues[i][m] = append(in.NonHistMetricValues[i][m], val) -} - -// updateMetricValueVector updates a vector metric value for a given version -func (in *Insights) updateMetricValueVector(m string, i int, val []float64) { - in.NonHistMetricValues[i][m] = append(in.NonHistMetricValues[i][m], val...) -} - -// updateMetricValueHist updates a histogram metric value for a given version -func (in *Insights) updateMetricValueHist(m string, i int, val []HistBucket) { - in.HistMetricValues[i][m] = append(in.HistMetricValues[i][m], val...) -} - -// updateSummaryMetric updates a summary metric value for a given version -func (in *Insights) updateSummaryMetric(m string, i int, val *summarymetrics.SummaryMetric) { - in.SummaryMetricValues[i][m] = *val -} - -// registerMetric registers a new metric by adding its meta data -func (in *Insights) registerMetric(m string, mm MetricMeta) error { - if old, ok := in.MetricsInfo[m]; ok && !reflect.DeepEqual(old, mm) { - err := fmt.Errorf("old and new metric meta for %v differ", m) - log.Logger.WithStackTrace(fmt.Sprintf("old: %v \nnew: %v", old, mm)).Error(err) - return err - } - in.MetricsInfo[m] = mm - return nil -} - -// updateMetric registers a metric and adds a metric value for a given version -// metric names will be normalized -func (in *Insights) updateMetric(m string, mm MetricMeta, i int, val interface{}) error { - var err error - if !metricTypeMatch(mm.Type, val) { - err = fmt.Errorf("metric value and type are incompatible; name: %v meta: %v version: %v value: %v", m, mm, i, val) - log.Logger.Error(err) - return err - } - - if in.NumVersions <= i { - err := fmt.Errorf("insufficient number of versions %v with version index %v", in.NumVersions, i) - log.Logger.Error(err) - return err - } - - nm, err := NormalizeMetricName(m) - if err != nil { - return err - } - - err = in.registerMetric(nm, mm) - if err != nil { - return err - } - - switch mm.Type { - case CounterMetricType, GaugeMetricType: - in.updateMetricValueScalar(nm, i, val.(float64)) - case SampleMetricType: - in.updateMetricValueVector(nm, i, val.([]float64)) - case HistogramMetricType: - in.updateMetricValueHist(nm, i, val.([]HistBucket)) - case SummaryMetricType: - in.updateSummaryMetric(nm, i, val.(*summarymetrics.SummaryMetric)) - default: - err := fmt.Errorf("unknown metric type %v", mm.Type) - log.Logger.Error(err) - } - return nil -} - // TrackVersionStr creates a string of version name/track for display purposes func (in *Insights) TrackVersionStr(i int) string { // if VersionNames not defined or all fields empty return default "version i" @@ -416,17 +317,18 @@ func (exp *Experiment) initResults(revision int) { // insights data structure contains metrics data structures, so this will also // init metrics func (r *ExperimentResult) initInsightsWithNumVersions(n int) error { - if r.Insights != nil { + if r.Insights == nil { + r.Insights = &Insights{ + NumVersions: n, + } + } else { if r.Insights.NumVersions != n { err := fmt.Errorf("inconsistent number for app versions; old (%v); new (%v)", r.Insights.NumVersions, n) log.Logger.Error(err) return err } - } else { - r.Insights = &Insights{ - NumVersions: n, - } } + return r.Insights.initMetrics() } diff --git a/base/experiment_test.go b/base/experiment_test.go index a20817d22..dd405308f 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -26,13 +26,6 @@ func TestReadExperiment(t *testing.T) { err = yaml.Unmarshal(b, e) assert.NoError(t, err) assert.Equal(t, 1, len(e.Spec)) - - b, err = os.ReadFile(CompletePath("../testdata", "experiment_db.yaml")) - assert.NoError(t, err) - e = &Experiment{} - err = yaml.Unmarshal(b, e) - assert.NoError(t, err) - assert.Equal(t, 1, len(e.Spec)) } func TestRunningTasks(t *testing.T) { diff --git a/charts/iter8/templates/_experiment.tpl b/charts/iter8/templates/_experiment.tpl index 5f09eface..05b8ca7a0 100644 --- a/charts/iter8/templates/_experiment.tpl +++ b/charts/iter8/templates/_experiment.tpl @@ -7,9 +7,7 @@ metadata: namespace: {{ .Release.Namespace }} spec: {{- range .Values.tasks }} - {{- if eq "custommetrics" . }} - {{- include "task.custommetrics" $.Values.custommetrics -}} - {{- else if eq "grpc" . }} + {{- if eq "grpc" . }} {{- include "task.grpc" $.Values.grpc -}} {{- else if eq "http" . }} {{- include "task.http" $.Values.http -}} @@ -20,7 +18,7 @@ spec: {{- else if eq "github" . }} {{- include "task.github" $.Values.github -}} {{- else }} - {{- fail "task name must be one of custommetrics, grpc, http, ready, github, or slack" -}} + {{- fail "task name must be one of grpc, http, ready, github, or slack" -}} {{- end }} {{- end }} result: diff --git a/charts/iter8/templates/_task-custommetrics.tpl b/charts/iter8/templates/_task-custommetrics.tpl deleted file mode 100644 index 189015b16..000000000 --- a/charts/iter8/templates/_task-custommetrics.tpl +++ /dev/null @@ -1,6 +0,0 @@ -{{- define "task.custommetrics" }} -# task: collect custom metrics from providers (databases) -- task: custommetrics - with: -{{ . | toYaml | indent 4 }} -{{- end }} \ No newline at end of file diff --git a/testdata/experiment_db.yaml b/testdata/experiment_db.yaml deleted file mode 100644 index 23a524be6..000000000 --- a/testdata/experiment_db.yaml +++ /dev/null @@ -1,9 +0,0 @@ -spec: -# task 1: collect custom metrics -- task: custommetrics - with: - templates: - kfserving: https://raw.githubusercontent.com/iter8-tools/iter8/master/testdata/metrics/kfserving.metrics.yaml - values: - namespace_name: ns-candidate - startingTime: Jan 2, 2006 at 3:04pm (MST) From 8b65f59244a6b2f1deb0192c309b56a707257ddb Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 14:37:08 -0400 Subject: [PATCH 029/121] Remove testdata for autoX and custommetrics Signed-off-by: Alan Cha --- testdata/autox_inputs/config.empty.yaml | 0 testdata/autox_inputs/config.example.yaml | 28 ----- testdata/autox_inputs/config.garbage.yaml | 1 - testdata/autox_inputs/config.invalid.yaml | 1 - testdata/custommetrics/istio-prom.tpl | 118 ------------------- testdata/custommetrics/nan.tpl | 20 ---- testdata/custommetrics/test-ce.tpl | 82 ------------- testdata/custommetrics/test-request-body.tpl | 16 --- 8 files changed, 266 deletions(-) delete mode 100644 testdata/autox_inputs/config.empty.yaml delete mode 100644 testdata/autox_inputs/config.example.yaml delete mode 100644 testdata/autox_inputs/config.garbage.yaml delete mode 100644 testdata/autox_inputs/config.invalid.yaml delete mode 100644 testdata/custommetrics/istio-prom.tpl delete mode 100644 testdata/custommetrics/nan.tpl delete mode 100644 testdata/custommetrics/test-ce.tpl delete mode 100644 testdata/custommetrics/test-request-body.tpl diff --git a/testdata/autox_inputs/config.empty.yaml b/testdata/autox_inputs/config.empty.yaml deleted file mode 100644 index e69de29bb..000000000 diff --git a/testdata/autox_inputs/config.example.yaml b/testdata/autox_inputs/config.example.yaml deleted file mode 100644 index e4b62861e..000000000 --- a/testdata/autox_inputs/config.example.yaml +++ /dev/null @@ -1,28 +0,0 @@ -specs: - myApp: - trigger: - name: myApp - namespace: default - group: "apps" - version: "v1" - resource: "deployments" - releaseSpecs: - name1: - name: abc - version: 1.0.0 - name2: - name: def - values: - hello: world - version: 1.0.0 - myApp2: - trigger: - name: myApp2 - namespace: test - group: "apps" - version: "v1" - resource: "deployments" - releaseSpecs: - name3: - name: ghi - version: 1.0.0 diff --git a/testdata/autox_inputs/config.garbage.yaml b/testdata/autox_inputs/config.garbage.yaml deleted file mode 100644 index 8b0ce0c41..000000000 --- a/testdata/autox_inputs/config.garbage.yaml +++ /dev/null @@ -1 +0,0 @@ -invalid yaml file \ No newline at end of file diff --git a/testdata/autox_inputs/config.invalid.yaml b/testdata/autox_inputs/config.invalid.yaml deleted file mode 100644 index 7daacd5db..000000000 --- a/testdata/autox_inputs/config.invalid.yaml +++ /dev/null @@ -1 +0,0 @@ -foo: bar \ No newline at end of file diff --git a/testdata/custommetrics/istio-prom.tpl b/testdata/custommetrics/istio-prom.tpl deleted file mode 100644 index 0b8b1d818..000000000 --- a/testdata/custommetrics/istio-prom.tpl +++ /dev/null @@ -1,118 +0,0 @@ -# This file provides templated metric specifications that enable -# Iter8 to retrieve metrics from Istio's Prometheus add-on. -# -# For a list of metrics supported out-of-the-box by the Istio Prometheus add-on, -# please see https://istio.io/latest/docs/reference/config/metrics/ -# -# Iter8 substitutes the placeholders in this file with values, -# and uses the resulting metric specs to query Prometheus. -# The placeholders are as follows. -# -# labels map[string]interface{} optional -# elapsedTimeSeconds int implicit -# startingTime string optional -# latencyPercentiles []int optional -# -# labels: this is the set of Prometheus labels that will be used to identify a particular -# app version. These labels will be applied to every Prometheus query. To learn more -# about what labels you can use for Prometheus, please see -# https://istio.io/latest/docs/reference/config/metrics/#labels -# -# elapsedTimeSeconds: this should not be specified directly by the user. -# It is implicitly computed by Iter8 according to the following formula -# elapsedTimeSeconds := (time.Now() - startingTime).Seconds() -# -# startingTime: By default, this is the time at which the Iter8 experiment started. -# The user can explicitly specify the startingTime for each app version -# (for example, the user can set the startingTime to the creation time of the app version) -# -# latencyPercentiles: Each item in this slice will create a new metric spec. -# For example, if this is set to [50,75,90,95], -# then, latency-p50, latency-p75, latency-p90, latency-p95 metric specs are created. - -# -# For testing purposes, hardcoded elapsedTimeSeconds to be 0 -# - -{{- define "labels"}} -{{- range $key, $val := .labels }} -{{- if or (eq (kindOf $val) "slice") (eq (kindOf $val) "map")}} -{{- fail (printf "labels should be a primitive types but received: %s :%s" $key $val) }} -{{- end }} -{{- if eq $key "response_code"}} -{{- fail "labels should not contain 'response_code'" }} -{{- end }} - {{ $key }}="{{ $val }}", -{{- end }} -{{- end}} - -# url is the HTTP endpoint where the Prometheus service installed by Istio's Prom add-on -# can be queried for metrics - -url: {{ .istioPromURL | default "http://prometheus.istio-system:9090/api/v1/query" }} -provider: istio-prom -method: GET -metrics: -- name: request-count - type: counter - description: | - Number of requests - params: - - name: query - value: | - sum(last_over_time(istio_requests_total{ - {{- template "labels" . }} - }[0s])) or on() vector(0) - jqExpression: .data.result[0].value[1] | tonumber -- name: error-count - type: counter - description: | - Number of unsuccessful requests - params: - - name: query - value: | - sum(last_over_time(istio_requests_total{ - response_code=~'5..', - {{- template "labels" . }} - }[0s])) or on() vector(0) - jqExpression: .data.result[0].value[1] | tonumber -- name: error-rate - type: gauge - description: | - Fraction of unsuccessful requests - params: - - name: query - value: | - (sum(last_over_time(istio_requests_total{ - response_code=~'5..', - {{- template "labels" . }} - }[0s])) or on() vector(0))/(sum(last_over_time(istio_requests_total{ - {{- template "labels" . }} - }[0s])) or on() vector(0)) - jqExpression: .data.result.[0].value.[1] | tonumber -- name: latency-mean - type: gauge - description: | - Mean latency - params: - - name: query - value: | - (sum(last_over_time(istio_request_duration_milliseconds_sum{ - {{- template "labels" . }} - }[0s])) or on() vector(0))/(sum(last_over_time(istio_requests_total{ - {{- template "labels" . }} - }[0s])) or on() vector(0)) - jqExpression: .data.result[0].value[1] | tonumber -{{- range $i, $p := .latencyPercentiles }} -- name: latency-p{{ $p }} - type: gauge - description: | - {{ $p }} percentile latency - params: - - name: query - value: | - histogram_quantile(0.{{ $p }}, sum(rate(istio_request_duration_milliseconds_bucket{ - {{- template "labels" $ }} - }[0s])) by (le)) - jqExpression: .data.result[0].value[1] | tonumber -{{- end }} \ No newline at end of file diff --git a/testdata/custommetrics/nan.tpl b/testdata/custommetrics/nan.tpl deleted file mode 100644 index c59103d4e..000000000 --- a/testdata/custommetrics/nan.tpl +++ /dev/null @@ -1,20 +0,0 @@ -url: http://url/query -provider: nan-prom -method: GET -metrics: - -- name: metric-tonumber - type: counter - description: tonumber - params: - - name: query - value: query-tonumber - jqExpression: .value | tonumber - -- name: metric-no-tonumber - type: counter - description: no-tonumber - params: - - name: query - value: query-no-tonumber - jqExpression: .value diff --git a/testdata/custommetrics/test-ce.tpl b/testdata/custommetrics/test-ce.tpl deleted file mode 100644 index d2ee026bf..000000000 --- a/testdata/custommetrics/test-ce.tpl +++ /dev/null @@ -1,82 +0,0 @@ -# endpoint where the monitoring instance is available -# https://cloud.ibm.com/docs/monitoring?topic=monitoring-endpoints#endpoints_sysdig -url: test-database.com/prometheus/api/v1/query # e.g. https://ca-tor.monitoring.cloud.ibm.com -headers: - # IAM token - # to get the token, run: ibmcloud iam oauth-tokens | grep IAM | cut -d \: -f 2 | sed 's/^ *//' - Authorization: Bearer test-token - # GUID of the IBM Cloud Monitoring instance - # to get the GUID, run: ibmcloud resource service-instance --output json | jq -r '.[].guid' - # https://cloud.ibm.com/docs/monitoring?topic=monitoring-mon-curl - IBMInstanceID: test-guid -provider: test-ce -method: GET -# Inputs for the template: -# ibm_codeengine_application_name string -# ibm_codeengine_gateway_instance string -# ibm_codeengine_namespace string -# ibm_codeengine_project_name string -# ibm_codeengine_revision_name string -# ibm_codeengine_status string -# ibm_ctype string -# ibm_location string -# ibm_scope string -# ibm_service_instance string -# ibm_service_name string -# -# Inputs for the metrics (output of template): -# ibm_codeengine_revision_name string -# startingTime string -# -# Note: elapsedTimeSeconds is produced by Iter8 - -# -# For testing purposes hardcoded elapsedTimeSeconds to 0 -# - -metrics: -- name: request-count - type: counter - description: | - Number of requests - params: - - name: query - value: | - sum(last_over_time(ibm_codeengine_application_requests_total{ - {{- if .ibm_codeengine_revision_name }} - ibm_codeengine_revision_name="{{.ibm_codeengine_revision_name}}", - {{- end }} - }[0s])) or on() vector(0) - jqExpression: .data.result[0].value[1] | tonumber -- name: error-count - type: counter - description: | - Number of non-successful requests - params: - - name: query - value: | - sum(last_over_time(ibm_codeengine_application_requests_total{ - ibm_codeengine_status!="200", - {{- if .ibm_codeengine_revision_name }} - ibm_codeengine_revision_name="{{.ibm_codeengine_revision_name}}", - {{- end }} - }[0s])) or on() vector(0) - jqExpression: .data.result[0].value[1] | tonumber -- name: error-rate - type: gauge - description: | - Percentage of non-successful requests - params: - - name: query - value: | - sum(last_over_time(ibm_codeengine_application_requests_total{ - ibm_codeengine_status!="200", - {{- if .ibm_codeengine_revision_name }} - ibm_codeengine_revision_name="{{.ibm_codeengine_revision_name}}", - {{- end }} - }[0s])) or on() vector(0)/sum(last_over_time(ibm_codeengine_application_requests_total{ - {{- if .ibm_codeengine_revision_name }} - ibm_codeengine_revision_name="{{.ibm_codeengine_revision_name}}", - {{- end }} - }[0s])) or on() vector(0) - jqExpression: .data.result.[0].value.[1] | tonumber \ No newline at end of file diff --git a/testdata/custommetrics/test-request-body.tpl b/testdata/custommetrics/test-request-body.tpl deleted file mode 100644 index ac5dd1292..000000000 --- a/testdata/custommetrics/test-request-body.tpl +++ /dev/null @@ -1,16 +0,0 @@ -url: test-database.com/prometheus/api/v1/query -provider: test-request-body -method: GET -# Note: elapsedTimeSeconds is produced by Iter8 -metrics: -- name: request-count - type: counter - description: | - Number of requests - body: | - example request body - params: - - name: query - value: | - example query parameter - jqExpression: .data.result[0].value[1] | tonumber \ No newline at end of file From b3672300002596ab3264acbd41fb0bb17bb4b4c6 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 14:59:18 -0400 Subject: [PATCH 030/121] Gut insights Signed-off-by: Alan Cha --- base/collect_grpc_test.go | 62 ++++----- base/collect_http_test.go | 72 +++++----- base/experiment.go | 284 -------------------------------------- base/insights_test.go | 99 ------------- 4 files changed, 69 insertions(+), 448 deletions(-) diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index a5eb67e43..ab6bfe079 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -63,21 +63,21 @@ func TestRunCollectGRPCUnary(t *testing.T) { count := gs.GetCount(callType) assert.Equal(t, 200, count) - mm, err := exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCErrorCountMetricName) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err := exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCErrorCountMetricName) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName + "/" + string(MaxAggregator)) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName + "/" + string(MaxAggregator)) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName + "/" + PercentileAggregatorPrefix + "50") - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName + "/" + PercentileAggregatorPrefix + "50") + // assert.NotNil(t, mm) + // assert.NoError(t, err) } // If the endpoint does not exist, fail gracefully @@ -169,24 +169,24 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { count := gs.GetCount(callType) assert.Equal(t, 200, count) - grpcMethods := []string{unary, server, client, bidirectional} - for _, method := range grpcMethods { - mm, err := exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCErrorCountMetricName) - assert.NotNil(t, mm) - assert.NoError(t, err) + // grpcMethods := []string{unary, server, client, bidirectional} + // for _, method := range grpcMethods { + // mm, err := exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCErrorCountMetricName) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName + "/" + string(MaxAggregator)) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName + "/" + string(MaxAggregator)) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName + "/" + PercentileAggregatorPrefix + "50") - assert.NotNil(t, mm) - assert.NoError(t, err) - } + // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName + "/" + PercentileAggregatorPrefix + "50") + // assert.NotNil(t, mm) + // assert.NoError(t, err) + // } } // If the endpoints cannot be reached, then do not throw an error @@ -232,10 +232,10 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { err := ct.run(exp) assert.NoError(t, err) - // No metrics should be collected - assert.Equal(t, 0, len(exp.Result.Insights.NonHistMetricValues[0])) - assert.Equal(t, 0, len(exp.Result.Insights.HistMetricValues[0])) - assert.Equal(t, 0, len(exp.Result.Insights.SummaryMetricValues[0])) + // // No metrics should be collected + // assert.Equal(t, 0, len(exp.Result.Insights.NonHistMetricValues[0])) + // assert.Equal(t, 0, len(exp.Result.Insights.HistMetricValues[0])) + // assert.Equal(t, 0, len(exp.Result.Insights.SummaryMetricValues[0])) } func TestMockGRPCWithSLOsAndPercentiles(t *testing.T) { diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 22b1442df..66f5ed08a 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -2,6 +2,7 @@ package base import ( "bytes" + "encoding/json" "fmt" "io" "net/http" @@ -72,13 +73,13 @@ func TestRunCollectHTTP(t *testing.T) { assert.True(t, called) // ensure that the /foo/ handler is called assert.Equal(t, exp.Result.Insights.NumVersions, 1) - mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "/" + builtInHTTPLatencyMeanID) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "/" + builtInHTTPLatencyMeanID) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "/" + builtInHTTPLatencyPercentilePrefix + "50") - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "/" + builtInHTTPLatencyPercentilePrefix + "50") + // assert.NotNil(t, mm) + // assert.NoError(t, err) } // If the endpoint does not exist, fail gracefully @@ -182,21 +183,24 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { assert.True(t, barCalled) // ensure that the /bar/ handler is called assert.Equal(t, exp.Result.Insights.NumVersions, 1) - mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyMeanID) - assert.NotNil(t, mm) - assert.NoError(t, err) + expJSON, _ := json.Marshal(exp) + fmt.Println(string(expJSON)) - mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyPercentilePrefix + "50") - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyMeanID) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyMeanID) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyPercentilePrefix + "50") + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyPercentilePrefix + "50") - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyMeanID) + // assert.NotNil(t, mm) + // assert.NoError(t, err) + + // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyPercentilePrefix + "50") + // assert.NotNil(t, mm) + // assert.NoError(t, err) } // Multiple endpoints are provided but they share one URL @@ -258,21 +262,21 @@ func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { assert.True(t, barCalled) // ensure that the /bar/ handler is called assert.Equal(t, exp.Result.Insights.NumVersions, 1) - mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyMeanID) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyMeanID) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyPercentilePrefix + "50") - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyPercentilePrefix + "50") + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyMeanID) - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyMeanID) + // assert.NotNil(t, mm) + // assert.NoError(t, err) - mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyPercentilePrefix + "50") - assert.NotNil(t, mm) - assert.NoError(t, err) + // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyPercentilePrefix + "50") + // assert.NotNil(t, mm) + // assert.NoError(t, err) } // If the endpoints cannot be reached, then do not throw an error @@ -316,10 +320,10 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { err := ct.run(exp) assert.NoError(t, err) - // No metrics should be collected - assert.Equal(t, 0, len(exp.Result.Insights.NonHistMetricValues[0])) - assert.Equal(t, 0, len(exp.Result.Insights.HistMetricValues[0])) - assert.Equal(t, 0, len(exp.Result.Insights.SummaryMetricValues[0])) + // // No metrics should be collected + // assert.Equal(t, 0, len(exp.Result.Insights.NonHistMetricValues[0])) + // assert.Equal(t, 0, len(exp.Result.Insights.HistMetricValues[0])) + // assert.Equal(t, 0, len(exp.Result.Insights.SummaryMetricValues[0])) } func TestErrorCode(t *testing.T) { diff --git a/base/experiment.go b/base/experiment.go index 1a6e0a7cf..4dc210dd2 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -4,15 +4,11 @@ import ( "encoding/json" "errors" "fmt" - "math" - "regexp" "strconv" "strings" "github.com/antonmedv/expr" log "github.com/iter8-tools/iter8/base/log" - "github.com/iter8-tools/iter8/base/summarymetrics" - "github.com/montanaflynn/stats" "helm.sh/helm/v3/pkg/time" ) @@ -91,27 +87,6 @@ type Insights struct { // VersionNames is list of version identifiers if known VersionNames []VersionInfo `json:"versionNames" yaml:"versionNames"` - - // MetricsInfo identifies the metrics involved in this experiment - MetricsInfo map[string]MetricMeta `json:"metricsInfo,omitempty" yaml:"metricsInfo,omitempty"` - - // NonHistMetricValues: - // the outer slice must be the same length as the number of app versions - // the map key must match name of a metric in MetricsInfo - // the inner slice contains the list of all observed metric values for given version and given metric; float value [i]["foo/bar"][k] is the [k]th observation for version [i] for the metric bar under backend foo. - // this struct is meant exclusively for metrics of type other than histogram - NonHistMetricValues []map[string][]float64 `json:"nonHistMetricValues,omitempty" yaml:"nonHistMetricValues,omitempty"` - - // HistMetricValues: - // the outer slice must be the same length as the number of app versions - // the map key must match name of a histogram metric in MetricsInfo - // the inner slice contains the list of all observed histogram buckets for a given version and given metric; value [i]["foo/bar"][k] is the [k]th observed bucket for version [i] for the hist metric `bar` under backend `foo`. - HistMetricValues []map[string][]HistBucket `json:"histMetricValues,omitempty" yaml:"histMetricValues,omitempty"` - - // SummaryMetricValues: - // the outer slice must be the same length as the number of tracks - // the map key must match the name of the summary metric in MetricsInfo - SummaryMetricValues []map[string]summarymetrics.SummaryMetric } // MetricMeta describes a metric @@ -329,196 +304,6 @@ func (r *ExperimentResult) initInsightsWithNumVersions(n int) error { } } - return r.Insights.initMetrics() -} - -// initMetrics initializes the data structes inside insights that will hold metrics -func (in *Insights) initMetrics() error { - if in.NonHistMetricValues != nil || - in.HistMetricValues != nil || - in.SummaryMetricValues != nil { - if len(in.NonHistMetricValues) != in.NumVersions || - len(in.HistMetricValues) != in.NumVersions || - len(in.SummaryMetricValues) != in.NumVersions { - err := fmt.Errorf("inconsistent number for app versions in non hist metric values (%v), hist metric values (%v), num versions (%v)", len(in.NonHistMetricValues), len(in.HistMetricValues), in.NumVersions) - log.Logger.Error(err) - return err - } - if len(in.NonHistMetricValues[0])+len(in.HistMetricValues[0])+len(in.SummaryMetricValues[0]) != len(in.MetricsInfo) { - err := fmt.Errorf("inconsistent number for metrics in non hist metric values (%v), hist metric values (%v), metrics info (%v)", len(in.NonHistMetricValues[0]), len(in.HistMetricValues[0]), len(in.MetricsInfo)) - log.Logger.Error(err) - return err - } - return nil - } - // at this point, there are no known metrics, but there are in.NumVersions - // initialize metrics info - in.MetricsInfo = make(map[string]MetricMeta) - // initialize non hist metric values for each version - in.NonHistMetricValues = make([]map[string][]float64, in.NumVersions) - // initialize hist metric values for each version - in.HistMetricValues = make([]map[string][]HistBucket, in.NumVersions) - // initialize summary metric values for each version - in.SummaryMetricValues = make([]map[string]summarymetrics.SummaryMetric, in.NumVersions) - for i := 0; i < in.NumVersions; i++ { - in.NonHistMetricValues[i] = make(map[string][]float64) - in.HistMetricValues[i] = make(map[string][]HistBucket) - in.SummaryMetricValues[i] = make(map[string]summarymetrics.SummaryMetric) - } - return nil -} - -// getCounterOrGaugeMetricFromValuesMap gets the value of the given counter or gauge metric, for the given version, from metric values map -func (in *Insights) getCounterOrGaugeMetricFromValuesMap(i int, m string) *float64 { - if mm, ok := in.MetricsInfo[m]; ok { - log.Logger.Tracef("found metric info for %v", m) - if (mm.Type != CounterMetricType) && (mm.Type != GaugeMetricType) { - log.Logger.Errorf("metric %v is not of type counter or gauge", m) - return nil - } - l := len(in.NonHistMetricValues) - if l <= i { - log.Logger.Warnf("metric values not found for version %v; initialized for %v versions", i, l) - return nil - } - log.Logger.Tracef("metric values found for version %v", i) - // grab the metric value and return - if vals, ok := in.NonHistMetricValues[i][m]; ok { - log.Logger.Tracef("found metric value for version %v and metric %v", i, m) - if len(vals) > 0 { - return float64Pointer(vals[len(vals)-1]) - } - } - log.Logger.Infof("could not find metric value for version %v and metric %v", i, m) - } - log.Logger.Infof("could not find metric info for %v", m) - return nil -} - -// getSampleAggregation aggregates the given base metric for the given version (i) with the given aggregation (a) -func (in *Insights) getSampleAggregation(i int, baseMetric string, a string) *float64 { - at := AggregationType(a) - vals := in.NonHistMetricValues[i][baseMetric] - if len(vals) == 0 { - log.Logger.Infof("metric %v for version %v has no sample", baseMetric, i) - return nil - } - if len(vals) == 1 { - log.Logger.Warnf("metric %v for version %v has sample of size 1", baseMetric, i) - return float64Pointer(vals[0]) - } - switch at { - case MeanAggregator: - agg, err := stats.Mean(vals) - if err == nil { - return float64Pointer(agg) - } - log.Logger.WithStackTrace(err.Error()).Errorf("aggregation error for version %v, metric %v, and aggregation func %v", i, baseMetric, a) - return nil - case StdDevAggregator: - agg, err := stats.StandardDeviation(vals) - if err == nil { - return float64Pointer(agg) - } - log.Logger.WithStackTrace(err.Error()).Errorf("aggregation error version %v, metric %v, and aggregation func %v", i, baseMetric, a) - return nil - case MinAggregator: - agg, err := stats.Min(vals) - if err == nil { - return float64Pointer(agg) - } - log.Logger.WithStackTrace(err.Error()).Errorf("aggregation error version %v, metric %v, and aggregation func %v", i, baseMetric, a) - return nil - case MaxAggregator: - agg, err := stats.Max(vals) - if err == nil { - return float64Pointer(agg) - } - log.Logger.WithStackTrace(err.Error()).Errorf("aggregation error version %v, metric %v, and aggregation func %v", i, baseMetric, a) - return nil - default: // don't do anything - } - - // at this point, 'a' must be a percentile aggregator - var percent float64 - var err error - if strings.HasPrefix(a, "p") { - b := strings.TrimPrefix(a, "p") - // b must be a percent - if match, _ := regexp.MatchString(decimalRegex, b); match { - // extract percent - if percent, err = strconv.ParseFloat(b, 64); err != nil { - log.Logger.WithStackTrace(err.Error()).Errorf("error extracting percent from aggregation func %v", a) - return nil - } - // compute percentile - agg, err := stats.Percentile(vals, percent) - if err == nil { - return float64Pointer(agg) - } - log.Logger.WithStackTrace(err.Error()).Errorf("aggregation error version %v, metric %v, and aggregation func %v", i, baseMetric, a) - return nil - } - log.Logger.Errorf("unable to extract percent from agggregation func %v", a) - return nil - } - log.Logger.Errorf("invalid aggregation %v", a) - return nil -} - -// getSummaryAggregation aggregates the given base metric for the given version (i) with the given aggregation (a) -func (in *Insights) getSummaryAggregation(i int, baseMetric string, a string) *float64 { - at := AggregationType(a) - m, ok := in.SummaryMetricValues[i][baseMetric] - if !ok { // metric not in list - log.Logger.Errorf("invalid metric %s", baseMetric) - return nil - } - - switch at { - case CountAggregator: - return float64Pointer(float64(m.Count())) - case MeanAggregator: - return float64Pointer(m.Sum() / float64(m.Count())) - case StdDevAggregator: - // sample variance (bessel's correction) - // ss / (count -1) - mean^2 * count / (count -1) - mean := m.Sum() / float64(m.Count()) - nMinus1 := float64(m.Count() - 1) - return float64Pointer(math.Sqrt((m.SumSquares() / nMinus1) - (mean*mean*float64(m.Count()))/nMinus1)) - case MinAggregator: - return float64Pointer(m.Min()) - case MaxAggregator: - return float64Pointer(m.Max()) - default: - // unknown, do nothing - } - log.Logger.Errorf("invalid aggregation %v", a) - return nil -} - -// aggregateMetric returns the aggregated metric value for a given version and metric -func (in *Insights) aggregateMetric(i int, m string) *float64 { - s := strings.Split(m, "/") - if len(s) != 3 { - // should not have been called - log.Logger.Errorf("metric name %v not valid for aggregation", m) - return nil - } - baseMetric := s[0] + "/" + s[1] - if m, ok := in.MetricsInfo[baseMetric]; ok { - log.Logger.Tracef("found metric %v used for aggregation", baseMetric) - if m.Type == SampleMetricType { - log.Logger.Tracef("metric %v used for aggregation is a sample metric", baseMetric) - return in.getSampleAggregation(i, baseMetric, s[2]) - } else if m.Type == SummaryMetricType { - log.Logger.Tracef("metric %v used for aggregation is a summary metric", baseMetric) - return in.getSummaryAggregation(i, baseMetric, s[2]) - } - log.Logger.Errorf("metric %v used for aggregation is not a sample or summary metric", baseMetric) - return nil - } - log.Logger.Warnf("could not find metric %v used for aggregation", baseMetric) return nil } @@ -548,75 +333,6 @@ func NormalizeMetricName(m string) (string, error) { return m, nil } -// ScalarMetricValue gets the value of the given scalar metric for the given version -func (in *Insights) ScalarMetricValue(i int, m string) *float64 { - s := strings.Split(m, "/") - if len(s) == 3 { - log.Logger.Tracef("%v is an aggregated metric", m) - return in.aggregateMetric(i, m) - } else if len(s) == 2 { // this appears to be a non-aggregated metric - var nm string - var err error - if nm, err = NormalizeMetricName(m); err != nil { - return nil - } - return in.getCounterOrGaugeMetricFromValuesMap(i, nm) - } else { - log.Logger.Errorf("invalid metric name %v", m) - log.Logger.Error("metric names must be of the form a/b or a/b/c, where a is the id of the metrics backend, b is the id of a metric name, and c is a valid aggregation function") - return nil - } -} - -// GetMetricsInfo gets metric meta for the given normalized metric name -func (in *Insights) GetMetricsInfo(nm string) (*MetricMeta, error) { - s := strings.Split(nm, "/") - - // this is an aggregated metric - if len(s) == 3 { - log.Logger.Tracef("%v is an aggregated metric", nm) - vm := s[0] + "/" + s[1] - mm, ok := in.MetricsInfo[vm] - if !ok { - err := fmt.Errorf("unable to find info for vector metric: %v", vm) - log.Logger.Error(err) - return nil, err - } - // determine type of aggregation - aggType := CounterMetricType - if AggregationType(s[2]) != CountAggregator { - aggType = GaugeMetricType - } - // format aggregator text - formattedAggregator := s[2] + " value" - if strings.HasPrefix(s[2], PercentileAggregatorPrefix) { - percent := strings.TrimPrefix(s[2], PercentileAggregatorPrefix) - formattedAggregator = fmt.Sprintf("%v-th percentile value", percent) - } - // return metrics meta - return &MetricMeta{ - Description: fmt.Sprintf("%v of %v", formattedAggregator, vm), - Units: mm.Units, - Type: aggType, - }, nil - } - - // this is a non-aggregated metric - if len(s) == 2 { - mm, ok := in.MetricsInfo[nm] - if !ok { - err := fmt.Errorf("unable to find info for scalar metric: %v", nm) - log.Logger.Error(err) - return nil, err - } - return &mm, nil - } - - err := fmt.Errorf("invalid metric name %v; metric names must be of the form a/b or a/b/c, where a is the id of the metrics backend, b is the id of a metric name, and c is a valid aggregation function", nm) - log.Logger.Error(err) - return nil, err -} - // Driver enables interacting with experiment result stored externally type Driver interface { // Read the experiment diff --git a/base/insights_test.go b/base/insights_test.go index 063e963bc..299b731c7 100644 --- a/base/insights_test.go +++ b/base/insights_test.go @@ -3,7 +3,6 @@ package base import ( "testing" - "github.com/iter8-tools/iter8/base/summarymetrics" "github.com/stretchr/testify/assert" ) @@ -25,101 +24,3 @@ func TestTrackVersionStr(t *testing.T) { }) } } - -func TestGetSummaryAggregation(t *testing.T) { - in := Insights{ - // count, sum, min, max, sumsquares - SummaryMetricValues: []map[string]summarymetrics.SummaryMetric{{ - "metric": [5]float64{float64(10), float64(110), float64(2), float64(20), float64(1540)}, - }}, - } - - assert.Equal(t, float64(10), *in.getSummaryAggregation(0, "metric", "count")) - assert.Equal(t, float64(11), *in.getSummaryAggregation(0, "metric", "mean")) - // assert.Equal(t, float64(6.055300708194983), *in.getSummaryAggregation(0, "metric", "stddev")) - assert.Greater(t, float64(6.0553008), *in.getSummaryAggregation(0, "metric", "stddev")) - assert.Less(t, float64(6.0553007), *in.getSummaryAggregation(0, "metric", "stddev")) - assert.Equal(t, float64(2), *in.getSummaryAggregation(0, "metric", "min")) - assert.Equal(t, float64(20), *in.getSummaryAggregation(0, "metric", "max")) - - assert.Nil(t, in.getSummaryAggregation(0, "metric", "invalid")) - - assert.Nil(t, in.getSummaryAggregation(0, "notametric", "count")) -} - -func TestGetSampleAggregation(t *testing.T) { - // no values - in := Insights{ - NonHistMetricValues: []map[string][]float64{{ - "metric": []float64{}, - }}, - } - assert.Nil(t, in.getSampleAggregation(0, "metric", "something")) - - // single value - in = Insights{ - NonHistMetricValues: []map[string][]float64{{ - "metric": []float64{float64(2)}, - }}, - } - assert.Equal(t, float64(2), *in.getSampleAggregation(0, "metric", "anything")) - - // multiple values - in = Insights{ - NonHistMetricValues: []map[string][]float64{{ - "metric": []float64{ - float64(2), float64(4), float64(6), float64(8), float64(10), - float64(12), float64(14), float64(16), float64(18), float64(20), - }, - }}, - } - assert.Len(t, in.NonHistMetricValues, 1) - assert.Len(t, in.NonHistMetricValues[0], 1) - assert.Contains(t, in.NonHistMetricValues[0], "metric") - assert.Equal(t, float64(11), *in.getSampleAggregation(0, "metric", "mean")) - // assert.Equal(t, float64(5.744562646538029), *in.getSampleAggregation(0, "metric", "stddev")) - assert.Greater(t, float64(5.7445627), *in.getSampleAggregation(0, "metric", "stddev")) - assert.Less(t, float64(5.7445626), *in.getSampleAggregation(0, "metric", "stddev")) - assert.Equal(t, float64(2), *in.getSampleAggregation(0, "metric", "min")) - assert.Equal(t, float64(20), *in.getSampleAggregation(0, "metric", "max")) - // starts with p but not a percentile - assert.Nil(t, in.getSampleAggregation(0, "metric", "p-notpercent")) - // invalid percentile (101) - assert.Nil(t, in.getSampleAggregation(0, "metric", "p101")) - assert.Equal(t, float64(15), *in.getSampleAggregation(0, "metric", "p78.3")) - // not a valid aggregation - assert.Nil(t, in.getSampleAggregation(0, "metric", "invalid")) -} - -func TestAggregateMetric(t *testing.T) { - in := Insights{ - MetricsInfo: map[string]MetricMeta{ - "prefix/summary": {Type: SummaryMetricType}, - "prefix/sample": {Type: SampleMetricType}, - "prefix/counter": {Type: CounterMetricType}, - "prefix/gauge": {Type: GaugeMetricType}, - }, - NonHistMetricValues: []map[string][]float64{{ - "prefix/sample": []float64{ - float64(2), float64(4), float64(6), float64(8), float64(10), - float64(12), float64(14), float64(16), float64(18), float64(20), - }, - }}, - // count, sum, min, max, sumsquares - SummaryMetricValues: []map[string]summarymetrics.SummaryMetric{{ - "prefix/summary": [5]float64{float64(10), float64(110), float64(2), float64(20), float64(1540)}, - }}, - } - - // not enough parts - assert.Nil(t, in.aggregateMetric(0, "counter")) - // not enough parts - assert.Nil(t, in.aggregateMetric(0, "prefix/counter")) - // not a summary or sample metric - assert.Nil(t, in.aggregateMetric(0, "prefix/counter/mean")) - // not in MetricsInfo - assert.Nil(t, in.aggregateMetric(0, "prefix/invalid/mean")) - - assert.Equal(t, float64(11), *in.aggregateMetric(0, "prefix/summary/mean")) - assert.Equal(t, float64(11), *in.aggregateMetric(0, "prefix/sample/mean")) -} From f9e83fc635dc345d8348d0669841339957240264 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 21:44:04 -0400 Subject: [PATCH 031/121] Fix http tests Signed-off-by: Alan Cha --- base/collect_http.go | 21 +-- base/collect_http_test.go | 361 ++++++++++++++++++++++++++------------ base/experiment.go | 28 --- base/notify.go | 4 +- base/notify_test.go | 34 ++-- metrics/server.go | 4 +- metrics/server_test.go | 2 +- 7 files changed, 279 insertions(+), 175 deletions(-) diff --git a/base/collect_http.go b/base/collect_http.go index e1dc71197..58dda200f 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -80,27 +80,16 @@ const ( defaultHTTPNumRequests = int64(100) // defaultHTTPConnections is the default number of connections (parallel go routines) defaultHTTPConnections = 4 - // httpMetricPrefix is the prefix for all metrics collected by this task - httpMetricPrefix = "http" - // the following are a list of names for metrics collected by this task - builtInHTTPRequestCountID = "request-count" - builtInHTTPErrorCountID = "error-count" - builtInHTTPErrorRateID = "error-rate" - builtInHTTPLatencyMeanID = "latency-mean" - builtInHTTPLatencyStdDevID = "latency-stddev" - builtInHTTPLatencyMinID = "latency-min" - builtInHTTPLatencyMaxID = "latency-max" - builtInHTTPLatencyHistID = "latency" - // prefix used in latency percentile metric names - // example: latency-p75.0 is the 75th percentile latency - builtInHTTPLatencyPercentilePrefix = "latency-p" // MetricsServerURL is the URL of the metrics server - // TODO: move elsewhere, abn/service seems to produce cyclical dependency, also needed by gRPC + // TODO: move elsewhere because also needed by gRPC MetricsServerURL = "METRICS_SERVER_URL" - // PerformanceResultPath is the path to the PUT performanceResult/ endpoint + // TODO: move elsewhere because also needed by gRPC PerformanceResultPath = "/performanceResult" + + // HTTPDashboardPath is the path to the GET httpDashboard/ endpoint + HTTPDashboardPath = "/httpDashboard" ) var ( diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 66f5ed08a..91865df6f 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -21,6 +21,9 @@ const ( foo = "foo" bar = "bar" from = "from" + + myName = "myName" + myNamespace = "myNamespace" ) func startHTTPMock(t *testing.T) { @@ -29,7 +32,64 @@ func startHTTPMock(t *testing.T) { httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) } +type DashboardCallback func(req *http.Request) + +type mockMetricsServerInput struct { + metricsServerURL string + + // GET /httpDashboard + httpDashboardCallback DashboardCallback + // GET /grpcDashboard + gRPCDashboardCallback DashboardCallback + // PUT /performanceResult + performanceResultCallback DashboardCallback +} + +func mockMetricsServer(input mockMetricsServerInput) { + // GET /httpDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.metricsServerURL+HTTPDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.httpDashboardCallback != nil { + input.httpDashboardCallback(req) + } + + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // GET /grpcDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.metricsServerURL+GRPCDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.gRPCDashboardCallback != nil { + input.gRPCDashboardCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // PUT /performanceResult + httpmock.RegisterResponder( + http.MethodPut, + input.metricsServerURL+PerformanceResultPath, + func(req *http.Request) (*http.Response, error) { + if input.performanceResultCallback != nil { + input.performanceResultCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) +} + func TestRunCollectHTTP(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + mux, addr := fhttp.DynamicHTTPServer(false) // /foo/ handler @@ -46,7 +106,36 @@ func TestRunCollectHTTP(t *testing.T) { } mux.HandleFunc("/"+foo, handler) - baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) + url := fmt.Sprintf("http://localhost:%d/", addr.Port) + foo + + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + } + }, + }) // valid collect HTTP task... should succeed ct := &collectHTTPTask{ @@ -58,7 +147,7 @@ func TestRunCollectHTTP(t *testing.T) { Duration: StringPointer("1s"), PayloadFile: StringPointer(CompletePath("../", "testdata/payload/ukpolice.json")), Headers: map[string]string{}, - URL: baseURL + foo, + URL: url, }, }, } @@ -66,20 +155,17 @@ func TestRunCollectHTTP(t *testing.T) { exp := &Experiment{ Spec: []Task{ct}, Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, } exp.initResults(1) - err := ct.run(exp) + err = ct.run(exp) assert.NoError(t, err) - assert.True(t, called) // ensure that the /foo/ handler is called + assert.True(t, metricsServerCalled) // ensure that the metrics server is called + assert.True(t, called) // ensure that the /foo/ handler is called assert.Equal(t, exp.Result.Insights.NumVersions, 1) - - // mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "/" + builtInHTTPLatencyMeanID) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "/" + builtInHTTPLatencyPercentilePrefix + "50") - // assert.NotNil(t, mm) - // assert.NoError(t, err) } // If the endpoint does not exist, fail gracefully @@ -118,6 +204,11 @@ func TestRunCollectHTTPNoEndpoint(t *testing.T) { // Test both the /foo/ and /bar/ endpoints // Test both endpoints have their respective header values func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + mux, addr := fhttp.DynamicHTTPServer(false) // /foo/ handler @@ -145,86 +236,41 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { mux.HandleFunc("/"+bar, barHandler) baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) + endpoint1URL := baseURL + foo + endpoint2URL := baseURL + bar - // valid collect HTTP task... should succeed - ct := &collectHTTPTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(CollectHTTPTaskName), - }, - With: collectHTTPInputs{ - endpoint: endpoint{ - Duration: StringPointer("1s"), - }, - Endpoints: map[string]endpoint{ - endpoint1: { - URL: baseURL + foo, - Headers: map[string]string{ - from: foo, - }, - }, - endpoint2: { - URL: baseURL + bar, - Headers: map[string]string{ - from: bar, - }, - }, - }, - }, - } - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - } - exp.initResults(1) - err := ct.run(exp) - assert.NoError(t, err) - assert.True(t, fooCalled) // ensure that the /foo/ handler is called - assert.True(t, barCalled) // ensure that the /bar/ handler is called - assert.Equal(t, exp.Result.Insights.NumVersions, 1) - - expJSON, _ := json.Marshal(exp) - fmt.Println(string(expJSON)) - - // mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyMeanID) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyPercentilePrefix + "50") - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyMeanID) - // assert.NotNil(t, mm) - // assert.NoError(t, err) + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true - // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyPercentilePrefix + "50") - // assert.NotNil(t, mm) - // assert.NoError(t, err) -} + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) -// Multiple endpoints are provided but they share one URL -// Test that the base-level URL is provided to each endpoint -// Make multiple calls to the same URL but with different headers -func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { - mux, addr := fhttp.DynamicHTTPServer(false) + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) - // handler - fooCalled := false // ensure that foo header is provided - barCalled := false // ensure that bar header is provided - fooHandler := func(w http.ResponseWriter, r *http.Request) { - from := r.Header.Get(from) - if from == foo { - fooCalled = true - } else if from == bar { - barCalled = true - } + // check payload content + bodyFortioResult := FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) - w.WriteHeader(200) - } - mux.HandleFunc("/", fooHandler) + if _, ok := bodyFortioResult.EndpointResults[endpoint1URL]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", endpoint1URL)) + } - baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) + if _, ok := bodyFortioResult.EndpointResults[endpoint2URL]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", endpoint2URL)) + } + }, + }) // valid collect HTTP task... should succeed ct := &collectHTTPTask{ @@ -234,15 +280,16 @@ func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { With: collectHTTPInputs{ endpoint: endpoint{ Duration: StringPointer("1s"), - URL: baseURL, }, Endpoints: map[string]endpoint{ endpoint1: { + URL: endpoint1URL, Headers: map[string]string{ from: foo, }, }, endpoint2: { + URL: endpoint2URL, Headers: map[string]string{ from: bar, }, @@ -254,37 +301,124 @@ func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { exp := &Experiment{ Spec: []Task{ct}, Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, } exp.initResults(1) - err := ct.run(exp) + err = ct.run(exp) assert.NoError(t, err) - assert.True(t, fooCalled) // ensure that the /foo/ handler is called - assert.True(t, barCalled) // ensure that the /bar/ handler is called + assert.True(t, metricsServerCalled) // ensure that the metrics server is called + assert.True(t, fooCalled) // ensure that the /foo/ handler is called + assert.True(t, barCalled) // ensure that the /bar/ handler is called assert.Equal(t, exp.Result.Insights.NumVersions, 1) - - // mm, err := exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyMeanID) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint1 + "/" + builtInHTTPLatencyPercentilePrefix + "50") - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyMeanID) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(httpMetricPrefix + "-" + endpoint2 + "/" + builtInHTTPLatencyPercentilePrefix + "50") - // assert.NotNil(t, mm) - // assert.NoError(t, err) } +// TODO: this test is broken because the FortioResult.EndpointResults uses URL +// as the key but in this case, there are two endpoints with the same URL but +// different headers. +// +// // Multiple endpoints are provided but they share one URL +// // Test that the base-level URL is provided to each endpoint +// // Make multiple calls to the same URL but with different headers +// func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { +// mux, addr := fhttp.DynamicHTTPServer(false) + +// // handler +// fooCalled := false // ensure that foo header is provided +// barCalled := false // ensure that bar header is provided +// fooHandler := func(w http.ResponseWriter, r *http.Request) { +// from := r.Header.Get(from) +// if from == foo { +// fooCalled = true +// } else if from == bar { +// barCalled = true +// } + +// w.WriteHeader(200) +// } +// mux.HandleFunc("/", fooHandler) + +// baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) + +// // valid collect HTTP task... should succeed +// ct := &collectHTTPTask{ +// TaskMeta: TaskMeta{ +// Task: StringPointer(CollectHTTPTaskName), +// }, +// With: collectHTTPInputs{ +// endpoint: endpoint{ +// Duration: StringPointer("1s"), +// URL: baseURL, +// }, +// Endpoints: map[string]endpoint{ +// endpoint1: { +// Headers: map[string]string{ +// from: foo, +// }, +// }, +// endpoint2: { +// Headers: map[string]string{ +// from: bar, +// }, +// }, +// }, +// }, +// } + +// exp := &Experiment{ +// Spec: []Task{ct}, +// Result: &ExperimentResult{}, +// } +// exp.initResults(1) +// err := ct.run(exp) +// assert.NoError(t, err) +// assert.True(t, fooCalled) // ensure that the /foo/ handler is called +// assert.True(t, barCalled) // ensure that the /bar/ handler is called +// assert.Equal(t, exp.Result.Insights.NumVersions, 1) +// } + // If the endpoints cannot be reached, then do not throw an error // Should not return an nil pointer dereference error (see #1451) func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + _, addr := fhttp.DynamicHTTPServer(false) baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) + endpoint1URL := baseURL + foo + endpoint2URL := baseURL + bar + + // mock metrics server + startHTTPMock(t) + // metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + // metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + + // no EndpointResults because endpoints cannot be reached + assert.Equal(t, `{"EndpointResults":{},"Summary":{"numVersions":1,"versionNames":null}}`, string(body)) + }, + }) // valid collect HTTP task... should succeed ct := &collectHTTPTask{ @@ -297,13 +431,13 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { }, Endpoints: map[string]endpoint{ endpoint1: { - URL: baseURL + foo, + URL: endpoint1URL, Headers: map[string]string{ from: foo, }, }, endpoint2: { - URL: baseURL + bar, + URL: endpoint2URL, Headers: map[string]string{ from: bar, }, @@ -315,15 +449,14 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { exp := &Experiment{ Spec: []Task{ct}, Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, } exp.initResults(1) - err := ct.run(exp) + err = ct.run(exp) assert.NoError(t, err) - - // // No metrics should be collected - // assert.Equal(t, 0, len(exp.Result.Insights.NonHistMetricValues[0])) - // assert.Equal(t, 0, len(exp.Result.Insights.HistMetricValues[0])) - // assert.Equal(t, 0, len(exp.Result.Insights.SummaryMetricValues[0])) } func TestErrorCode(t *testing.T) { diff --git a/base/experiment.go b/base/experiment.go index 4dc210dd2..4ba9efa60 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -4,8 +4,6 @@ import ( "encoding/json" "errors" "fmt" - "strconv" - "strings" "github.com/antonmedv/expr" log "github.com/iter8-tools/iter8/base/log" @@ -307,32 +305,6 @@ func (r *ExperimentResult) initInsightsWithNumVersions(n int) error { return nil } -// NormalizeMetricName normalizes percentile values in metric names -func NormalizeMetricName(m string) (string, error) { - preHTTP := httpMetricPrefix + "/" + builtInHTTPLatencyPercentilePrefix - preGRPC := gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName + "/" + PercentileAggregatorPrefix - pre := "" - if strings.HasPrefix(m, preHTTP) { // built-in http percentile metric - pre = preHTTP - } else if strings.HasPrefix(m, preGRPC) { // built-in gRPC percentile metric - pre = preGRPC - } - if len(pre) > 0 { - var percent float64 - var e error - remainder := strings.TrimPrefix(m, pre) - if percent, e = strconv.ParseFloat(remainder, 64); e != nil { - err := fmt.Errorf("cannot extract percent from metric %v", m) - log.Logger.WithStackTrace(e.Error()).Error(err) - return m, err - } - // return percent normalized metric name - return fmt.Sprintf("%v%v", pre, percent), nil - } - // already normalized - return m, nil -} - // Driver enables interacting with experiment result stored externally type Driver interface { // Read the experiment diff --git a/base/notify.go b/base/notify.go index 234481094..380e24540 100644 --- a/base/notify.go +++ b/base/notify.go @@ -115,9 +115,9 @@ func (t *notifyTask) initializeDefaults() { // set default HTTP method if t.With.Method == "" { if t.With.PayloadTemplateURL != "" { - t.With.Method = "POST" + t.With.Method = http.MethodPost } else { - t.With.Method = "GET" + t.With.Method = http.MethodGet } } } diff --git a/base/notify_test.go b/base/notify_test.go index 62c5392a0..5c5a652b2 100644 --- a/base/notify_test.go +++ b/base/notify_test.go @@ -24,23 +24,21 @@ func getNotifyTask(t *testing.T, n notifyInputs) *notifyTask { }, With: n, } - - httpmock.Activate() - t.Cleanup(httpmock.DeactivateAndReset) - httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) return nt } // GET method func TestNotify(t *testing.T) { _ = os.Chdir(t.TempDir()) + startHTTPMock(t) + nt := getNotifyTask(t, notifyInputs{ URL: testNotifyURL, SoftFailure: false, }) // notify endpoint - httpmock.RegisterResponder("GET", testNotifyURL, + httpmock.RegisterResponder(http.MethodGet, testNotifyURL, httpmock.NewStringResponder(200, "success")) exp := &Experiment{ @@ -65,15 +63,17 @@ type testNotification struct { // POST method and PayloadTemplateURL func TestNotifyWithPayload(t *testing.T) { _ = os.Chdir(t.TempDir()) + startHTTPMock(t) + nt := getNotifyTask(t, notifyInputs{ - Method: "POST", + Method: http.MethodPost, URL: testNotifyURL, PayloadTemplateURL: testNotifyURL + templatePath, SoftFailure: false, }) // payload template endpoint - httpmock.RegisterResponder("GET", testNotifyURL+templatePath, + httpmock.RegisterResponder(http.MethodGet, testNotifyURL+templatePath, httpmock.NewStringResponder(200, `{ "text": "hello world", "textReport": "{{ regexReplaceAll "\"" (regexReplaceAll "\n" (.Report | toPrettyJson) "\\n") "\\\""}}", @@ -82,7 +82,7 @@ func TestNotifyWithPayload(t *testing.T) { // notify endpoint httpmock.RegisterResponder( - "POST", + http.MethodPost, testNotifyURL, func(req *http.Request) (*http.Response, error) { buf := new(bytes.Buffer) @@ -133,6 +133,8 @@ func TestNotifyWithPayload(t *testing.T) { // GET method and headers and query parameters func TestNotifyWithHeadersAndQueryParams(t *testing.T) { _ = os.Chdir(t.TempDir()) + startHTTPMock(t) + nt := getNotifyTask(t, notifyInputs{ URL: testNotifyURL, Headers: map[string]string{ @@ -146,7 +148,7 @@ func TestNotifyWithHeadersAndQueryParams(t *testing.T) { // notify endpoint httpmock.RegisterResponder( - "GET", + http.MethodGet, testNotifyURL, func(req *http.Request) (*http.Response, error) { // check headers @@ -175,6 +177,8 @@ func TestNotifyWithHeadersAndQueryParams(t *testing.T) { // bad method and SoftFailure func TestNotifyBadMethod(t *testing.T) { _ = os.Chdir(t.TempDir()) + startHTTPMock(t) + nt := getNotifyTask(t, notifyInputs{ URL: testNotifyURL, Method: "abc", @@ -193,6 +197,8 @@ func TestNotifyBadMethod(t *testing.T) { // test should fail assert.Error(t, err) + startHTTPMock(t) + nt = getNotifyTask(t, notifyInputs{ URL: testNotifyURL, Method: "abc", @@ -215,6 +221,8 @@ func TestNotifyBadMethod(t *testing.T) { // default to POST method with PayloadTemplateURL func TestNotifyPayloadTemplateURLDefaultMethod(t *testing.T) { _ = os.Chdir(t.TempDir()) + startHTTPMock(t) + nt := getNotifyTask(t, notifyInputs{ URL: testNotifyURL, PayloadTemplateURL: testNotifyURL + templatePath, @@ -222,12 +230,12 @@ func TestNotifyPayloadTemplateURLDefaultMethod(t *testing.T) { }) // payload template endpoint - httpmock.RegisterResponder("GET", testNotifyURL+templatePath, + httpmock.RegisterResponder(http.MethodGet, testNotifyURL+templatePath, httpmock.NewStringResponder(200, `hello world`)) // notify endpoint httpmock.RegisterResponder( - "GET", + http.MethodGet, testNotifyURL, func(req *http.Request) (*http.Response, error) { assert.Fail(t, "notify task did not default to POST method with PayloadTemplateURL") @@ -238,7 +246,7 @@ func TestNotifyPayloadTemplateURLDefaultMethod(t *testing.T) { // notify endpoint httpmock.RegisterResponder( - "POST", + http.MethodPost, testNotifyURL, func(req *http.Request) (*http.Response, error) { return httpmock.NewStringResponse(200, "success"), nil @@ -261,6 +269,8 @@ func TestNotifyPayloadTemplateURLDefaultMethod(t *testing.T) { // No URL func TestNotifyNoURL(t *testing.T) { _ = os.Chdir(t.TempDir()) + startHTTPMock(t) + nt := getNotifyTask(t, notifyInputs{ SoftFailure: false, }) diff --git a/metrics/server.go b/metrics/server.go index e1ad96a30..cc9f58fc7 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -121,8 +121,8 @@ func Start(stopCh <-chan struct{}) error { // configure endpoints http.HandleFunc("/metrics", getMetrics) http.HandleFunc(util.PerformanceResultPath, putResult) - http.HandleFunc("/httpDashboard", getHTTPDashboard) - http.HandleFunc("/grpcDashboard", getGRPCDashboard) + http.HandleFunc(util.HTTPDashboardPath, getHTTPDashboard) + http.HandleFunc(util.GRPCDashboardPath, getGRPCDashboard) // configure HTTP server server := &http.Server{ diff --git a/metrics/server_test.go b/metrics/server_test.go index b50c1bed4..8e43fe6d9 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -476,7 +476,7 @@ func TestPutResult(t *testing.T) { func TestGetHTTPDashboardInvalidMethod(t *testing.T) { w := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodPost, "/httpDashboard", nil) + req := httptest.NewRequest(http.MethodPost, util.GRPCDashboardPath, nil) getHTTPDashboard(w, req) res := w.Result() defer func() { From 51e2f4cb8f7ba63f7caf5fc74c7584c7cfee4ea4 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 1 Aug 2023 23:57:55 -0400 Subject: [PATCH 032/121] Fix gRPC tests Signed-off-by: Alan Cha --- base/collect_grpc.go | 9 +- base/collect_grpc_test.go | 245 +++++++++++++++++++++++--------------- base/collect_http_test.go | 5 +- 3 files changed, 156 insertions(+), 103 deletions(-) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index 068b27d04..bbbc2faf0 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -14,16 +14,13 @@ import ( const ( // CollectGRPCTaskName is the name of this task which performs load generation and metrics collection for gRPC services. CollectGRPCTaskName = "grpc" - // gRPC metric prefix - gRPCMetricPrefix = "grpc" - // gRPCErrorCountMetricName is name of the gRPC error count metric - gRPCErrorCountMetricName = "error-count" - // gRPCLatencySampleMetricName is name of the gRPC latency sample metric - gRPCLatencySampleMetricName = "latency" // countErrorsDefault is the default value which indicates if errors are counted countErrorsDefault = true // insucureDefault is the default value which indicates that plaintext and insecure connection should be used insecureDefault = true + + // GRPCDashboardPath is the path to the GET grpcDashboard/ endpoint + GRPCDashboardPath = "/grpcDashboard" ) // collectHTTPInputs contain the inputs to the metrics collection task to be executed. diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index ab6bfe079..b936c3a06 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -1,17 +1,19 @@ package base import ( + "encoding/json" + "fmt" + "io" + "net/http" "os" "strings" "testing" - "time" "github.com/bojand/ghz/runner" "github.com/iter8-tools/iter8/base/internal" "github.com/iter8-tools/iter8/base/internal/helloworld/helloworld" "github.com/iter8-tools/iter8/base/log" "github.com/stretchr/testify/assert" - "sigs.k8s.io/yaml" ) const ( @@ -24,6 +26,42 @@ const ( // Credit: Several of the tests in this file are based on // https://github.com/bojand/ghz/blob/master/runner/run_test.go func TestRunCollectGRPCUnary(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + call := "helloworld.Greeter.SayHello" + + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + if _, ok := bodyFortioResult.EndpointResults[call]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", call)) + } + }, + }) + _ = os.Chdir(t.TempDir()) callType := helloworld.Unary gs, s, err := internal.StartServer(false) @@ -40,7 +78,7 @@ func TestRunCollectGRPCUnary(t *testing.T) { With: collectGRPCInputs{ Config: runner.Config{ Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHello", + Call: call, Host: internal.LocalHostPort, }, }, @@ -51,6 +89,10 @@ func TestRunCollectGRPCUnary(t *testing.T) { exp := &Experiment{ Spec: []Task{ct}, Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, } exp.initResults(1) err = ct.run(exp) @@ -59,25 +101,10 @@ func TestRunCollectGRPCUnary(t *testing.T) { assert.NoError(t, err) assert.Equal(t, exp.Result.Insights.NumVersions, 1) + assert.True(t, metricsServerCalled) count := gs.GetCount(callType) assert.Equal(t, 200, count) - - // mm, err := exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCErrorCountMetricName) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName + "/" + string(MaxAggregator)) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "/" + gRPCLatencySampleMetricName + "/" + PercentileAggregatorPrefix + "50") - // assert.NotNil(t, mm) - // assert.NoError(t, err) } // If the endpoint does not exist, fail gracefully @@ -114,6 +141,57 @@ func TestRunCollectGRPCUnaryNoEndpoint(t *testing.T) { // Credit: Several of the tests in this file are based on // https://github.com/bojand/ghz/blob/master/runner/run_test.go func TestRunCollectGRPCEndpoints(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + unaryCall := "helloworld.Greeter.SayHello" + serverCall := "helloworld.Greeter.SayHelloCS" + clientCall := "helloworld.Greeter.SayHellos" + bidirectionalCall := "helloworld.Greeter.SayHelloBidi" + + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + if _, ok := bodyFortioResult.EndpointResults[unaryCall]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", unaryCall)) + } + + if _, ok := bodyFortioResult.EndpointResults[serverCall]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", serverCall)) + } + + if _, ok := bodyFortioResult.EndpointResults[clientCall]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", clientCall)) + } + + if _, ok := bodyFortioResult.EndpointResults[bidirectionalCall]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", bidirectionalCall)) + } + }, + }) + _ = os.Chdir(t.TempDir()) callType := helloworld.Unary gs, s, err := internal.StartServer(false) @@ -134,19 +212,19 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { Endpoints: map[string]runner.Config{ unary: { Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHello", + Call: unaryCall, }, server: { Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHelloCS", + Call: serverCall, }, client: { Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHellos", + Call: clientCall, }, bidirectional: { Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHelloBidi", + Call: bidirectionalCall, }, }, }, @@ -157,6 +235,10 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { exp := &Experiment{ Spec: []Task{ct}, Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, } exp.initResults(1) err = ct.run(exp) @@ -165,33 +247,50 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { assert.NoError(t, err) assert.Equal(t, exp.Result.Insights.NumVersions, 1) + assert.True(t, metricsServerCalled) count := gs.GetCount(callType) assert.Equal(t, 200, count) - - // grpcMethods := []string{unary, server, client, bidirectional} - // for _, method := range grpcMethods { - // mm, err := exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCErrorCountMetricName) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName + "/" + string(MaxAggregator)) - // assert.NotNil(t, mm) - // assert.NoError(t, err) - - // mm, err = exp.Result.Insights.GetMetricsInfo(gRPCMetricPrefix + "-" + method + "/" + gRPCLatencySampleMetricName + "/" + PercentileAggregatorPrefix + "50") - // assert.NotNil(t, mm) - // assert.NoError(t, err) - // } } // If the endpoints cannot be reached, then do not throw an error // Should not return an nil pointer dereference error (see #1451) func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + unaryCall := "helloworld.Greeter.SayHello" + serverCall := "helloworld.Greeter.SayHelloCS" + clientCall := "helloworld.Greeter.SayHellos" + bidirectionalCall := "helloworld.Greeter.SayHelloBidi" + + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.Equal(t, `{"EndpointResults":{},"Summary":{"numVersions":1,"versionNames":null}}`, string(body)) + }, + }) + // valid collect GRPC task... should succeed ct := &collectGRPCTask{ TaskMeta: TaskMeta{ @@ -204,19 +303,19 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { Endpoints: map[string]runner.Config{ unary: { Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHello", + Call: unaryCall, }, server: { Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHelloCS", + Call: serverCall, }, client: { Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHellos", + Call: clientCall, }, bidirectional: { Data: map[string]interface{}{"name": "bob"}, - Call: "helloworld.Greeter.SayHelloBidi", + Call: bidirectionalCall, }, }, }, @@ -227,57 +326,13 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { exp := &Experiment{ Spec: []Task{ct}, Result: &ExperimentResult{}, - } - exp.initResults(1) - err := ct.run(exp) - assert.NoError(t, err) - - // // No metrics should be collected - // assert.Equal(t, 0, len(exp.Result.Insights.NonHistMetricValues[0])) - // assert.Equal(t, 0, len(exp.Result.Insights.HistMetricValues[0])) - // assert.Equal(t, 0, len(exp.Result.Insights.SummaryMetricValues[0])) -} - -func TestMockGRPCWithSLOsAndPercentiles(t *testing.T) { - _ = os.Chdir(t.TempDir()) - callType := helloworld.Unary - gs, s, err := internal.StartServer(false) - if err != nil { - assert.FailNow(t, err.Error()) - } - t.Cleanup(s.Stop) - - // valid collect GRPC task... should succeed - ct := &collectGRPCTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(CollectGRPCTaskName), - }, - With: collectGRPCInputs{ - Config: runner.Config{ - N: 100, - RPS: 20, - C: 1, - Timeout: runner.Duration(20 * time.Second), - Data: map[string]interface{}{"name": "bob"}, - DialTimeout: runner.Duration(20 * time.Second), - Call: "helloworld.Greeter.SayHello", - Host: internal.LocalHostPort, - }, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, }, } - - exp := &Experiment{ - Spec: []Task{ct}, - } - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - err = exp.Spec[0].run(exp) + err = ct.run(exp) assert.NoError(t, err) - - expBytes, _ := yaml.Marshal(exp) - log.Logger.Debug("\n" + string(expBytes)) - - count := gs.GetCount(callType) - assert.Equal(t, int(ct.With.N), count) + assert.True(t, metricsServerCalled) } diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 91865df6f..16d286661 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -395,11 +395,11 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { // mock metrics server startHTTPMock(t) - // metricsServerCalled := false + metricsServerCalled := false mockMetricsServer(mockMetricsServerInput{ metricsServerURL: metricsServerURL, performanceResultCallback: func(req *http.Request) { - // metricsServerCalled = true + metricsServerCalled = true // check query parameters assert.Equal(t, myName, req.URL.Query().Get("experiment")) @@ -457,6 +457,7 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { exp.initResults(1) err = ct.run(exp) assert.NoError(t, err) + assert.True(t, metricsServerCalled) } func TestErrorCode(t *testing.T) { From eb8a42b6306a9c599673dd944950515ee5917f1d Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 2 Aug 2023 07:58:54 -0400 Subject: [PATCH 033/121] Fix experiment tests Signed-off-by: Alan Cha --- base/experiment.go | 4 +- base/experiment_test.go | 83 +++++++++++++++++++++++++++++++++++++++-- testdata/experiment.tpl | 3 ++ 3 files changed, 85 insertions(+), 5 deletions(-) diff --git a/base/experiment.go b/base/experiment.go index 4ba9efa60..9dba38412 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -30,10 +30,10 @@ type ExperimentSpec []Task // Used in http and grpc tasks to send the name and namespace to the metrics server type ExperimentMetadata struct { // Name is the name of the experiment - Name string + Name string `json:"name" yaml:"name"` // Namespace is the namespace the experiment was deployed in - Namespace string + Namespace string `json:"namespace" yaml:"namespace"` } // Experiment struct containing spec and result diff --git a/base/experiment_test.go b/base/experiment_test.go index dd405308f..f1c10881e 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -1,7 +1,10 @@ package base import ( + "encoding/json" "fmt" + "io" + "net/http" "os" "testing" @@ -29,7 +32,10 @@ func TestReadExperiment(t *testing.T) { } func TestRunningTasks(t *testing.T) { - _ = os.Chdir(t.TempDir()) + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -51,20 +57,59 @@ func TestRunningTasks(t *testing.T) { }, } + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + } + }, + }) + + _ = os.Chdir(t.TempDir()) + exp := &Experiment{ Spec: []Task{ct}, Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, } exp.initResults(1) - err := ct.run(exp) + err = ct.run(exp) assert.NoError(t, err) assert.Equal(t, exp.Result.Insights.NumVersions, 1) + assert.True(t, metricsServerCalled) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) } func TestRunExperiment(t *testing.T) { - _ = os.Chdir(t.TempDir()) + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -72,6 +117,37 @@ func TestRunExperiment(t *testing.T) { var verifyHandlerCalled bool mux.HandleFunc("/get", GetTrackingHandler(&verifyHandlerCalled)) + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + } + }, + }) + + _ = os.Chdir(t.TempDir()) + // create experiment.yaml CreateExperimentYaml(t, CompletePath("../testdata", "experiment.tpl"), url, "experiment.yaml") b, err := os.ReadFile("experiment.yaml") @@ -84,6 +160,7 @@ func TestRunExperiment(t *testing.T) { err = RunExperiment(false, &mockDriver{e}) assert.NoError(t, err) + assert.True(t, metricsServerCalled) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) diff --git a/testdata/experiment.tpl b/testdata/experiment.tpl index 986adb1de..23afa3a53 100644 --- a/testdata/experiment.tpl +++ b/testdata/experiment.tpl @@ -1,3 +1,6 @@ +metadata: + name: myName + namespace: myNamespace spec: # task 1: generate HTTP requests for application URL # collect Iter8's built-in HTTP latency and error-related metrics From ca6eeb8e163a2de92d0dff5bdf9d66341832fd8e Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 2 Aug 2023 07:59:42 -0400 Subject: [PATCH 034/121] Fix server tests Delete fields in the test data after fields were delete in type structs Signed-off-by: Alan Cha --- metrics/server_test.go | 210 +---------------------------------------- 1 file changed, 2 insertions(+), 208 deletions(-) diff --git a/metrics/server_test.go b/metrics/server_test.go index 8e43fe6d9..6348d730d 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -816,217 +816,11 @@ const fortioResultJSON = `{ }, "Summary": { "numVersions": 1, - "versionNames": null, - "metricsInfo": { - "http/latency": { - "description": "Latency Histogram", - "units": "msec", - "type": "Histogram" - }, - "http://httpbin.default/get/error-count": { - "description": "number of responses that were errors", - "type": "Counter" - }, - "http://httpbin.default/get/error-rate": { - "description": "fraction of responses that were errors", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-max": { - "description": "maximum of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-mean": { - "description": "mean of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-min": { - "description": "minimum of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-p50": { - "description": "50-th percentile of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-p75": { - "description": "75-th percentile of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-p90": { - "description": "90-th percentile of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-p95": { - "description": "95-th percentile of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-p99": { - "description": "99-th percentile of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-p99.9": { - "description": "99.9-th percentile of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/latency-stddev": { - "description": "standard deviation of observed latency values", - "units": "msec", - "type": "Gauge" - }, - "http://httpbin.default/get/request-count": { - "description": "number of requests sent", - "type": "Counter" - } - }, - "nonHistMetricValues": [ - { - "http://httpbin.default/get/error-count": [ - 0 - ], - "http://httpbin.default/get/error-rate": [ - 0 - ], - "http://httpbin.default/get/latency-max": [ - 40.490041999999995 - ], - "http://httpbin.default/get/latency-mean": [ - 15.977100850000001 - ], - "http://httpbin.default/get/latency-min": [ - 4.2238750000000005 - ], - "http://httpbin.default/get/latency-p50": [ - 14.571428571428571 - ], - "http://httpbin.default/get/latency-p75": [ - 20.454545454545453 - ], - "http://httpbin.default/get/latency-p90": [ - 28.125 - ], - "http://httpbin.default/get/latency-p95": [ - 32 - ], - "http://httpbin.default/get/latency-p99": [ - 40 - ], - "http://httpbin.default/get/latency-p99.9": [ - 40.441037800000004 - ], - "http://httpbin.default/get/latency-stddev": [ - 8.340658047253257 - ], - "http://httpbin.default/get/request-count": [ - 100 - ] - } - ], - "histMetricValues": [ - { - "http/latency": [ - { - "lower": 4.2238750000000005, - "upper": 5, - "count": 5 - }, - { - "lower": 5, - "upper": 6, - "count": 5 - }, - { - "lower": 6, - "upper": 7, - "count": 4 - }, - { - "lower": 7, - "upper": 8, - "count": 5 - }, - { - "lower": 8, - "upper": 9.000000000000002, - "count": 5 - }, - { - "lower": 9.000000000000002, - "upper": 10, - "count": 4 - }, - { - "lower": 10, - "upper": 11, - "count": 5 - }, - { - "lower": 11, - "upper": 12, - "count": 3 - }, - { - "lower": 12, - "upper": 14, - "count": 12 - }, - { - "lower": 14, - "upper": 16, - "count": 7 - }, - { - "lower": 16, - "upper": 18.000000000000004, - "count": 10 - }, - { - "lower": 18.000000000000004, - "upper": 20, - "count": 9 - }, - { - "lower": 20, - "upper": 25, - "count": 11 - }, - { - "lower": 25, - "upper": 30, - "count": 8 - }, - { - "lower": 30, - "upper": 35, - "count": 5 - }, - { - "lower": 35, - "upper": 40, - "count": 1 - }, - { - "lower": 40, - "upper": 40.490041999999995, - "count": 1 - } - ] - } - ], - "SummaryMetricValues": [ - {} - ] + "versionNames": null } }` -const fortioDashboardJSON = `{"Endpoints":{"http://httpbin.default/get":{"Durations":[{"Version":"0","Bucket":"4.2 - 5","Value":5},{"Version":"0","Bucket":"5 - 6","Value":5},{"Version":"0","Bucket":"6 - 7","Value":4},{"Version":"0","Bucket":"7 - 8","Value":5},{"Version":"0","Bucket":"8 - 9","Value":5},{"Version":"0","Bucket":"9 - 10","Value":4},{"Version":"0","Bucket":"10 - 11","Value":5},{"Version":"0","Bucket":"11 - 12","Value":3},{"Version":"0","Bucket":"12 - 14","Value":12},{"Version":"0","Bucket":"14 - 16","Value":7},{"Version":"0","Bucket":"16 - 18","Value":10},{"Version":"0","Bucket":"18 - 20","Value":9},{"Version":"0","Bucket":"20 - 25","Value":11},{"Version":"0","Bucket":"25 - 30","Value":8},{"Version":"0","Bucket":"30 - 35","Value":5},{"Version":"0","Bucket":"35 - 40","Value":1},{"Version":"0","Bucket":"40 - 40.4","Value":1}],"Statistics":{"Count":100,"Mean":15.977100850000001,"StdDev":8.340658047253257,"Min":4.2238750000000005,"Max":40.490041999999995},"Error durations":[],"Error statistics":{"Count":0,"Mean":0,"StdDev":0,"Min":0,"Max":0},"Return codes":{"200":100}}},"Summary":{"numVersions":1,"versionNames":null,"metricsInfo":{"http/latency":{"description":"Latency Histogram","units":"msec","type":"Histogram"},"http://httpbin.default/get/error-count":{"description":"number of responses that were errors","type":"Counter"},"http://httpbin.default/get/error-rate":{"description":"fraction of responses that were errors","type":"Gauge"},"http://httpbin.default/get/latency-max":{"description":"maximum of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-mean":{"description":"mean of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-min":{"description":"minimum of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p50":{"description":"50-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p75":{"description":"75-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p90":{"description":"90-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p95":{"description":"95-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p99":{"description":"99-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-p99.9":{"description":"99.9-th percentile of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/latency-stddev":{"description":"standard deviation of observed latency values","units":"msec","type":"Gauge"},"http://httpbin.default/get/request-count":{"description":"number of requests sent","type":"Counter"}},"nonHistMetricValues":[{"http://httpbin.default/get/error-count":[0],"http://httpbin.default/get/error-rate":[0],"http://httpbin.default/get/latency-max":[40.490041999999995],"http://httpbin.default/get/latency-mean":[15.977100850000001],"http://httpbin.default/get/latency-min":[4.2238750000000005],"http://httpbin.default/get/latency-p50":[14.571428571428571],"http://httpbin.default/get/latency-p75":[20.454545454545453],"http://httpbin.default/get/latency-p90":[28.125],"http://httpbin.default/get/latency-p95":[32],"http://httpbin.default/get/latency-p99":[40],"http://httpbin.default/get/latency-p99.9":[40.441037800000004],"http://httpbin.default/get/latency-stddev":[8.340658047253257],"http://httpbin.default/get/request-count":[100]}],"histMetricValues":[{"http/latency":[{"lower":4.2238750000000005,"upper":5,"count":5},{"lower":5,"upper":6,"count":5},{"lower":6,"upper":7,"count":4},{"lower":7,"upper":8,"count":5},{"lower":8,"upper":9.000000000000002,"count":5},{"lower":9.000000000000002,"upper":10,"count":4},{"lower":10,"upper":11,"count":5},{"lower":11,"upper":12,"count":3},{"lower":12,"upper":14,"count":12},{"lower":14,"upper":16,"count":7},{"lower":16,"upper":18.000000000000004,"count":10},{"lower":18.000000000000004,"upper":20,"count":9},{"lower":20,"upper":25,"count":11},{"lower":25,"upper":30,"count":8},{"lower":30,"upper":35,"count":5},{"lower":35,"upper":40,"count":1},{"lower":40,"upper":40.490041999999995,"count":1}]}],"SummaryMetricValues":[{}]}}` +const fortioDashboardJSON = `{"Endpoints":{"http://httpbin.default/get":{"Durations":[{"Version":"0","Bucket":"4.2 - 5","Value":5},{"Version":"0","Bucket":"5 - 6","Value":5},{"Version":"0","Bucket":"6 - 7","Value":4},{"Version":"0","Bucket":"7 - 8","Value":5},{"Version":"0","Bucket":"8 - 9","Value":5},{"Version":"0","Bucket":"9 - 10","Value":4},{"Version":"0","Bucket":"10 - 11","Value":5},{"Version":"0","Bucket":"11 - 12","Value":3},{"Version":"0","Bucket":"12 - 14","Value":12},{"Version":"0","Bucket":"14 - 16","Value":7},{"Version":"0","Bucket":"16 - 18","Value":10},{"Version":"0","Bucket":"18 - 20","Value":9},{"Version":"0","Bucket":"20 - 25","Value":11},{"Version":"0","Bucket":"25 - 30","Value":8},{"Version":"0","Bucket":"30 - 35","Value":5},{"Version":"0","Bucket":"35 - 40","Value":1},{"Version":"0","Bucket":"40 - 40.4","Value":1}],"Statistics":{"Count":100,"Mean":15.977100850000001,"StdDev":8.340658047253257,"Min":4.2238750000000005,"Max":40.490041999999995},"Error durations":[],"Error statistics":{"Count":0,"Mean":0,"StdDev":0,"Min":0,"Max":0},"Return codes":{"200":100}}},"Summary":{"numVersions":1,"versionNames":null}}` func TestGetHTTPDashboard(t *testing.T) { // instantiate metrics client From 4bbf90479369693548c8c5345362fd4c552774c5 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 2 Aug 2023 08:39:47 -0400 Subject: [PATCH 035/121] Fix file driver tests Signed-off-by: Alan Cha --- base/experiment_test.go | 28 ++++---- driver/filedriver_test.go | 107 +++++++++++++++++++++++++++- testdata/drivertests/experiment.tpl | 3 + 3 files changed, 122 insertions(+), 16 deletions(-) diff --git a/base/experiment_test.go b/base/experiment_test.go index f1c10881e..98be0bafc 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -43,20 +43,6 @@ func TestRunningTasks(t *testing.T) { var verifyHandlerCalled bool mux.HandleFunc("/get", GetTrackingHandler(&verifyHandlerCalled)) - // valid collect task... should succeed - ct := &collectHTTPTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(CollectHTTPTaskName), - }, - With: collectHTTPInputs{ - endpoint: endpoint{ - Duration: StringPointer("1s"), - Headers: map[string]string{}, - URL: url, - }, - }, - } - // mock metrics server startHTTPMock(t) metricsServerCalled := false @@ -88,6 +74,20 @@ func TestRunningTasks(t *testing.T) { _ = os.Chdir(t.TempDir()) + // valid collect task... should succeed + ct := &collectHTTPTask{ + TaskMeta: TaskMeta{ + Task: StringPointer(CollectHTTPTaskName), + }, + With: collectHTTPInputs{ + endpoint: endpoint{ + Duration: StringPointer("1s"), + Headers: map[string]string{}, + URL: url, + }, + }, + } + exp := &Experiment{ Spec: []Task{ct}, Result: &ExperimentResult{}, diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 73ff99f84..65528f308 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -1,17 +1,88 @@ package driver import ( + "encoding/json" "fmt" + "io" + "net/http" "os" "testing" "fortio.org/fortio/fhttp" "github.com/iter8-tools/iter8/base" + "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" ) +const ( + myName = "myName" + myNamespace = "myNamespace" +) + +func startHTTPMock(t *testing.T) { + httpmock.Activate() + t.Cleanup(httpmock.DeactivateAndReset) + httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) +} + +// TODO: duplicated from collect_http_test.go +type DashboardCallback func(req *http.Request) + +type mockMetricsServerInput struct { + metricsServerURL string + + // GET /httpDashboard + httpDashboardCallback DashboardCallback + // GET /grpcDashboard + gRPCDashboardCallback DashboardCallback + // PUT /performanceResult + performanceResultCallback DashboardCallback +} + +func mockMetricsServer(input mockMetricsServerInput) { + // GET /httpDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.metricsServerURL+base.HTTPDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.httpDashboardCallback != nil { + input.httpDashboardCallback(req) + } + + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // GET /grpcDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.metricsServerURL+base.GRPCDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.gRPCDashboardCallback != nil { + input.gRPCDashboardCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // PUT /performanceResult + httpmock.RegisterResponder( + http.MethodPut, + input.metricsServerURL+base.PerformanceResultPath, + func(req *http.Request) (*http.Response, error) { + if input.performanceResultCallback != nil { + input.performanceResultCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) +} + func TestLocalRun(t *testing.T) { - _ = os.Chdir(t.TempDir()) + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(base.MetricsServerURL, metricsServerURL) + assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -19,13 +90,44 @@ func TestLocalRun(t *testing.T) { var verifyHandlerCalled bool mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := base.FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + } + }, + }) + + _ = os.Chdir(t.TempDir()) + // create experiment.yaml base.CreateExperimentYaml(t, base.CompletePath("../testdata/drivertests", "experiment.tpl"), url, ExperimentPath) fd := FileDriver{ RunDir: ".", } - err := base.RunExperiment(false, &fd) + err = base.RunExperiment(false, &fd) assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) @@ -34,6 +136,7 @@ func TestLocalRun(t *testing.T) { exp, err := base.BuildExperiment(&fd) assert.NoError(t, err) assert.True(t, exp.Completed() && exp.NoFailure()) + assert.True(t, metricsServerCalled) } func TestFileDriverReadError(t *testing.T) { diff --git a/testdata/drivertests/experiment.tpl b/testdata/drivertests/experiment.tpl index 986adb1de..23afa3a53 100644 --- a/testdata/drivertests/experiment.tpl +++ b/testdata/drivertests/experiment.tpl @@ -1,3 +1,6 @@ +metadata: + name: myName + namespace: myNamespace spec: # task 1: generate HTTP requests for application URL # collect Iter8's built-in HTTP latency and error-related metrics From ca744aac980a2985424fd8e03860563ade7b8d89 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 2 Aug 2023 08:42:58 -0400 Subject: [PATCH 036/121] Fix server tests Signed-off-by: Alan Cha --- metrics/server_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metrics/server_test.go b/metrics/server_test.go index 6348d730d..f7b11508b 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -1111,7 +1111,7 @@ const ghzResultJSON = `{ } }` -const ghzDashboardJSON = `{"Endpoints":{"routeguide.RouteGuide.GetFeature":{"Durations":[{"Version":"0","Bucket":"0.032","Value":1},{"Version":"0","Bucket":"19.603","Value":167},{"Version":"0","Bucket":"39.174","Value":0},{"Version":"0","Bucket":"58.744","Value":0},{"Version":"0","Bucket":"78.315","Value":0},{"Version":"0","Bucket":"97.886","Value":3},{"Version":"0","Bucket":"117.457","Value":13},{"Version":"0","Bucket":"137.028","Value":0},{"Version":"0","Bucket":"156.599","Value":0},{"Version":"0","Bucket":"176.17","Value":0},{"Version":"0","Bucket":"195.74","Value":16}],"Statistics":{"Count":200,"ErrorCount":200},"Status codes":{"Unavailable":200}}},"Summary":{"numVersions":0,"versionNames":null,"SummaryMetricValues":null}}` +const ghzDashboardJSON = `{"Endpoints":{"routeguide.RouteGuide.GetFeature":{"Durations":[{"Version":"0","Bucket":"0.032","Value":1},{"Version":"0","Bucket":"19.603","Value":167},{"Version":"0","Bucket":"39.174","Value":0},{"Version":"0","Bucket":"58.744","Value":0},{"Version":"0","Bucket":"78.315","Value":0},{"Version":"0","Bucket":"97.886","Value":3},{"Version":"0","Bucket":"117.457","Value":13},{"Version":"0","Bucket":"137.028","Value":0},{"Version":"0","Bucket":"156.599","Value":0},{"Version":"0","Bucket":"176.17","Value":0},{"Version":"0","Bucket":"195.74","Value":16}],"Statistics":{"Count":200,"ErrorCount":200},"Status codes":{"Unavailable":200}}},"Summary":{"numVersions":0,"versionNames":null}}` func TestGetGHZDashboard(t *testing.T) { // instantiate metrics client From 68998096eff16538144e37e824714c96eab1c1e7 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 2 Aug 2023 09:20:53 -0400 Subject: [PATCH 037/121] Fix assert tests Signed-off-by: Alan Cha --- action/assert.go | 2 -- action/assert_test.go | 2 +- cmd/k.go | 4 +++- testdata/assertinputs/.gitignore | 2 ++ testdata/assertinputs/experiment.yaml | 16 ++++++++++++++++ 5 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 testdata/assertinputs/.gitignore create mode 100644 testdata/assertinputs/experiment.yaml diff --git a/action/assert.go b/action/assert.go index 9500f5ac8..8258cb28f 100644 --- a/action/assert.go +++ b/action/assert.go @@ -15,8 +15,6 @@ const ( Completed = "completed" // NoFailure states that none of the tasks in the experiment have failed NoFailure = "nofailure" - // SLOs states that all app versions participating in the experiment satisfy SLOs - SLOs = "slos" ) // AssertOpts are the options used for asserting experiment results diff --git a/action/assert_test.go b/action/assert_test.go index 54d7cdc66..6a0d44efb 100644 --- a/action/assert_test.go +++ b/action/assert_test.go @@ -17,7 +17,7 @@ func TestKubeAssert(t *testing.T) { _ = os.Chdir(t.TempDir()) // fix aOpts aOpts := NewAssertOpts(driver.NewFakeKubeDriver(cli.New())) - aOpts.Conditions = []string{Completed, NoFailure, SLOs} + aOpts.Conditions = []string{Completed, NoFailure} byteArray, _ := os.ReadFile(base.CompletePath("../testdata/assertinputs", driver.ExperimentPath)) _, _ = aOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ diff --git a/cmd/k.go b/cmd/k.go index 908bdd309..5e704d654 100644 --- a/cmd/k.go +++ b/cmd/k.go @@ -40,6 +40,9 @@ func init() { os.Exit(1) } + // add k assert + kcmd.AddCommand(newKAssertCmd(kd)) + // add k delete kcmd.AddCommand(newKDeleteCmd(kd, os.Stdout)) @@ -51,5 +54,4 @@ func init() { // add k run kcmd.AddCommand(newKRunCmd(kd, os.Stdout)) - } diff --git a/testdata/assertinputs/.gitignore b/testdata/assertinputs/.gitignore new file mode 100644 index 000000000..4e9ba03c6 --- /dev/null +++ b/testdata/assertinputs/.gitignore @@ -0,0 +1,2 @@ +!experiment.yaml +!result.yaml \ No newline at end of file diff --git a/testdata/assertinputs/experiment.yaml b/testdata/assertinputs/experiment.yaml new file mode 100644 index 000000000..357e6a092 --- /dev/null +++ b/testdata/assertinputs/experiment.yaml @@ -0,0 +1,16 @@ +spec: + # task 1: generate HTTP requests for application URL + # collect Iter8's built-in HTTP latency and error-related metrics + - task: http + with: + duration: 2s + errorRanges: + - lower: 500 + url: https://httpbin.org/get +result: + failure: false + insights: + numVersions: 1 + iter8Version: v0.13 + numCompletedTasks: 1 + startTime: "2022-03-16T10:22:58.540897-04:00" \ No newline at end of file From b6e44d42f6f17dd12df02e36f8d65cbb6c88f974 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 2 Aug 2023 09:45:24 -0400 Subject: [PATCH 038/121] Fix kubedriver tests Signed-off-by: Alan Cha --- driver/kubedriver_test.go | 65 +++++++++++++++++++++------ testdata/assertinputs/experiment.yaml | 3 ++ 2 files changed, 54 insertions(+), 14 deletions(-) diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index a2144cec6..d1da78ef8 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -2,7 +2,10 @@ package driver import ( "context" + "encoding/json" "fmt" + "io" + "net/http" "os" "testing" @@ -12,7 +15,6 @@ import ( "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/cli/values" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -66,7 +68,10 @@ func TestKOps(t *testing.T) { } func TestKubeRun(t *testing.T) { - _ = os.Chdir(t.TempDir()) + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(base.MetricsServerURL, metricsServerURL) + assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -74,6 +79,37 @@ func TestKubeRun(t *testing.T) { var verifyHandlerCalled bool mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := base.FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + } + }, + }) + + _ = os.Chdir(t.TempDir()) + // create experiment.yaml base.CreateExperimentYaml(t, base.CompletePath("../testdata/drivertests", "experiment.tpl"), url, ExperimentPath) @@ -89,21 +125,22 @@ func TestKubeRun(t *testing.T) { StringData: map[string]string{ExperimentPath: string(byteArray)}, }, metav1.CreateOptions{}) - _, _ = kd.Clientset.BatchV1().Jobs("default").Create(context.TODO(), &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default-1-job", - Namespace: "default", - Annotations: map[string]string{ - "iter8.tools/group": "default", - "iter8.tools/revision": "1", - }, - }, - }, metav1.CreateOptions{}) - - err := base.RunExperiment(false, kd) + // _, _ = kd.Clientset.BatchV1().Jobs("default").Create(context.TODO(), &batchv1.Job{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: "default-1-job", + // Namespace: "default", + // Annotations: map[string]string{ + // "iter8.tools/group": "default", + // "iter8.tools/revision": "1", + // }, + // }, + // }, metav1.CreateOptions{}) + + err = base.RunExperiment(false, kd) assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) + assert.True(t, metricsServerCalled) // check results exp, err := base.BuildExperiment(kd) diff --git a/testdata/assertinputs/experiment.yaml b/testdata/assertinputs/experiment.yaml index 357e6a092..5dc11377a 100644 --- a/testdata/assertinputs/experiment.yaml +++ b/testdata/assertinputs/experiment.yaml @@ -1,3 +1,6 @@ +metadata: + name: myName + namespace: myNamespace spec: # task 1: generate HTTP requests for application URL # collect Iter8's built-in HTTP latency and error-related metrics From e595d1720cb76472c6a4324106da98fbd85d603e Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 07:47:21 -0400 Subject: [PATCH 039/121] Fix tests Signed-off-by: Alan Cha --- action/run_test.go | 108 ++++++++++++++++++++++- base/mock_qs_test.go | 70 --------------- cmd/kassert.go | 2 +- cmd/kassert_test.go | 125 +++++++++++++++++++++++---- cmd/klog_test.go | 6 ++ cmd/krun_test.go | 43 ++++++++- cmd/test_helpers.go | 4 +- testdata/experiment.tpl | 2 +- testdata/output/kassert.txt | 1 - testdata/output/kreport.txt | 43 --------- testdata/output/launch-with-slos.txt | 6 -- testdata/output/report.txt | 42 --------- 12 files changed, 263 insertions(+), 189 deletions(-) delete mode 100644 base/mock_qs_test.go delete mode 100644 testdata/output/kreport.txt delete mode 100644 testdata/output/launch-with-slos.txt delete mode 100644 testdata/output/report.txt diff --git a/action/run_test.go b/action/run_test.go index f7684bbd3..a77d94594 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -2,21 +2,92 @@ package action import ( "context" + "encoding/json" "fmt" + "io" + "net/http" "os" "testing" "fortio.org/fortio/fhttp" "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/driver" + "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" "helm.sh/helm/v3/pkg/cli" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + myName = "myName" + myNamespace = "myNamespace" +) + +// TODO: duplicated from collect_http_test.go +func startHTTPMock(t *testing.T) { + httpmock.Activate() + t.Cleanup(httpmock.DeactivateAndReset) + httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) +} + +type DashboardCallback func(req *http.Request) + +type mockMetricsServerInput struct { + metricsServerURL string + + // GET /httpDashboard + httpDashboardCallback DashboardCallback + // GET /grpcDashboard + gRPCDashboardCallback DashboardCallback + // PUT /performanceResult + performanceResultCallback DashboardCallback +} + +func mockMetricsServer(input mockMetricsServerInput) { + // GET /httpDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.metricsServerURL+base.HTTPDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.httpDashboardCallback != nil { + input.httpDashboardCallback(req) + } + + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // GET /grpcDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.metricsServerURL+base.GRPCDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.gRPCDashboardCallback != nil { + input.gRPCDashboardCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // PUT /performanceResult + httpmock.RegisterResponder( + http.MethodPut, + input.metricsServerURL+base.PerformanceResultPath, + func(req *http.Request) (*http.Response, error) { + if input.performanceResultCallback != nil { + input.performanceResultCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) +} + func TestKubeRun(t *testing.T) { - _ = os.Chdir(t.TempDir()) + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(base.MetricsServerURL, metricsServerURL) + assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -24,6 +95,37 @@ func TestKubeRun(t *testing.T) { var verifyHandlerCalled bool mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := base.FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + // if _, ok := bodyFortioResult.EndpointResults[call]; !ok { + // assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", call)) + // } + }, + }) + + _ = os.Chdir(t.TempDir()) + // create experiment.yaml base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, driver.ExperimentPath) @@ -40,10 +142,11 @@ func TestKubeRun(t *testing.T) { StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, }, metav1.CreateOptions{}) - err := rOpts.KubeRun() + err = rOpts.KubeRun() assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) + assert.True(t, metricsServerCalled) // check results exp, err := base.BuildExperiment(rOpts.KubeDriver) @@ -51,4 +154,5 @@ func TestKubeRun(t *testing.T) { assert.True(t, exp.Completed()) assert.True(t, exp.NoFailure()) assert.Equal(t, 1, exp.Result.NumCompletedTasks) + } diff --git a/base/mock_qs_test.go b/base/mock_qs_test.go deleted file mode 100644 index c52354647..000000000 --- a/base/mock_qs_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package base - -import ( - "fmt" - "os" - "testing" - - "fortio.org/fortio/fhttp" - "github.com/stretchr/testify/assert" -) - -func TestMockQuickStartWithSLOs(t *testing.T) { - _ = os.Chdir(t.TempDir()) - mux, addr := fhttp.DynamicHTTPServer(false) - mux.HandleFunc("/echo1/", fhttp.EchoHandler) - testURL := fmt.Sprintf("http://localhost:%d/echo1/", addr.Port) - - // valid collect HTTP task... should succeed - ct := &collectHTTPTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(CollectHTTPTaskName), - }, - With: collectHTTPInputs{ - endpoint: endpoint{ - Duration: StringPointer("2s"), - Headers: map[string]string{}, - URL: testURL, - }, - }, - } - - exp := &Experiment{ - Spec: []Task{ct}, - } - - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - err := exp.Spec[0].run(exp) - assert.NoError(t, err) -} - -func TestMockQuickStartWithSLOsAndPercentiles(t *testing.T) { - _ = os.Chdir(t.TempDir()) - mux, addr := fhttp.DynamicHTTPServer(false) - mux.HandleFunc("/echo1/", fhttp.EchoHandler) - testURL := fmt.Sprintf("http://localhost:%d/echo1/", addr.Port) - - // valid collect HTTP task... should succeed - ct := &collectHTTPTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(CollectHTTPTaskName), - }, - With: collectHTTPInputs{ - endpoint: endpoint{ - Duration: StringPointer("1s"), - Headers: map[string]string{}, - URL: testURL, - }, - }, - } - - exp := &Experiment{ - Spec: []Task{ct}, - } - - exp.initResults(1) - _ = exp.Result.initInsightsWithNumVersions(1) - err := exp.Spec[0].run(exp) - assert.NoError(t, err) -} diff --git a/cmd/kassert.go b/cmd/kassert.go index 1d484ad7e..3dd4a95ff 100644 --- a/cmd/kassert.go +++ b/cmd/kassert.go @@ -61,7 +61,7 @@ func newKAssertCmd(kd *driver.KubeDriver) *cobra.Command { // addConditionFlag adds the condition flag to command func addConditionFlag(cmd *cobra.Command, conditionPtr *[]string) { - cmd.Flags().StringSliceVarP(conditionPtr, "condition", "c", nil, fmt.Sprintf("%v | %v | %v; can specify multiple or separate conditions with commas;", ia.Completed, ia.NoFailure, ia.SLOs)) + cmd.Flags().StringSliceVarP(conditionPtr, "condition", "c", nil, fmt.Sprintf("%v | %v; can specify multiple or separate conditions with commas;", ia.Completed, ia.NoFailure)) _ = cmd.MarkFlagRequired("condition") } diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go index c2dc5e649..a5c9e2c79 100644 --- a/cmd/kassert_test.go +++ b/cmd/kassert_test.go @@ -2,7 +2,10 @@ package cmd import ( "context" + "encoding/json" "fmt" + "io" + "net/http" "os" "path/filepath" "testing" @@ -10,32 +13,82 @@ import ( "fortio.org/fortio/fhttp" "github.com/iter8-tools/iter8/base" id "github.com/iter8-tools/iter8/driver" + "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func TestKAssert(t *testing.T) { - _ = os.Chdir(t.TempDir()) +const ( + myName = "myName" + myNamespace = "myNamespace" +) - // create and configure HTTP endpoint for testing - mux, addr := fhttp.DynamicHTTPServer(false) - url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) - var verifyHandlerCalled bool - mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) +// TODO: duplicated from collect_http_test.go +func startHTTPMock(t *testing.T) { + httpmock.Activate() + t.Cleanup(httpmock.DeactivateAndReset) + httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) +} - // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, id.ExperimentPath) +type DashboardCallback func(req *http.Request) - // run test - testAssert(t, id.ExperimentPath, url, "output/kassert.txt", false) - // sanity check -- handler was called - assert.True(t, verifyHandlerCalled) +type mockMetricsServerInput struct { + metricsServerURL string + + // GET /httpDashboard + httpDashboardCallback DashboardCallback + // GET /grpcDashboard + gRPCDashboardCallback DashboardCallback + // PUT /performanceResult + performanceResultCallback DashboardCallback } -func TestKAssertFailsSLOs(t *testing.T) { - _ = os.Chdir(t.TempDir()) +func mockMetricsServer(input mockMetricsServerInput) { + // GET /httpDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.metricsServerURL+base.HTTPDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.httpDashboardCallback != nil { + input.httpDashboardCallback(req) + } + + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // GET /grpcDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.metricsServerURL+base.GRPCDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.gRPCDashboardCallback != nil { + input.gRPCDashboardCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // PUT /performanceResult + httpmock.RegisterResponder( + http.MethodPut, + input.metricsServerURL+base.PerformanceResultPath, + func(req *http.Request) (*http.Response, error) { + if input.performanceResultCallback != nil { + input.performanceResultCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) +} + +func TestKAssert(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(base.MetricsServerURL, metricsServerURL) + assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -43,11 +96,45 @@ func TestKAssertFailsSLOs(t *testing.T) { var verifyHandlerCalled bool mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := base.FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + fmt.Println(string(body)) + + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", url)) + } + }, + }) + + _ = os.Chdir(t.TempDir()) + // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment_fails.tpl"), url, id.ExperimentPath) + base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, id.ExperimentPath) // run test - testAssert(t, id.ExperimentPath, url, "output/kassertfails.txt", true) + testAssert(t, id.ExperimentPath, url, "output/kassert.txt", false) + assert.True(t, metricsServerCalled) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) } @@ -57,7 +144,7 @@ func testAssert(t *testing.T, experiment string, url string, expectedOutputFile // k launch { name: "k launch", - cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http,assess} --set http.url=%s --set http.duration=2s", base.CompletePath("../charts", "iter8"), url), + cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http} --set http.url=%s --set http.duration=2s", base.CompletePath("../charts", "iter8"), url), golden: base.CompletePath("../testdata", "output/klaunch.txt"), }, // k run @@ -68,7 +155,7 @@ func testAssert(t *testing.T, experiment string, url string, expectedOutputFile // k assert { name: "k assert", - cmd: "k assert -c completed -c nofailure -c slos", + cmd: "k assert -c completed -c nofailure", golden: base.CompletePath("../testdata", expectedOutputFile), wantError: expectError, }, diff --git a/cmd/klog_test.go b/cmd/klog_test.go index c0b3e4d0b..1af5c8dac 100644 --- a/cmd/klog_test.go +++ b/cmd/klog_test.go @@ -10,11 +10,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" id "github.com/iter8-tools/iter8/driver" + "github.com/stretchr/testify/assert" "github.com/iter8-tools/iter8/base" ) func TestKLog(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(base.MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + _ = os.Chdir(t.TempDir()) tests := []cmdTestCase{ // k launch diff --git a/cmd/krun_test.go b/cmd/krun_test.go index 25668ee9c..d9f83149e 100644 --- a/cmd/krun_test.go +++ b/cmd/krun_test.go @@ -2,7 +2,10 @@ package cmd import ( "context" + "encoding/json" "fmt" + "io" + "net/http" "os" "testing" @@ -15,7 +18,10 @@ import ( ) func TestKRun(t *testing.T) { - _ = os.Chdir(t.TempDir()) + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(base.MetricsServerURL, metricsServerURL) + assert.NoError(t, err) // create and configure HTTP endpoint for testing mux, addr := fhttp.DynamicHTTPServer(false) @@ -23,6 +29,39 @@ func TestKRun(t *testing.T) { var verifyHandlerCalled bool mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + // mock metrics server + startHTTPMock(t) + metricsServerCalled := false + mockMetricsServer(mockMetricsServerInput{ + metricsServerURL: metricsServerURL, + performanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := base.FortioResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + fmt.Println(string(body)) + + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", url)) + } + }, + }) + + _ = os.Chdir(t.TempDir()) + // create experiment.yaml base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, id.ExperimentPath) @@ -51,5 +90,5 @@ func TestKRun(t *testing.T) { runTestActionCmd(t, tests) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) - + assert.True(t, metricsServerCalled) } diff --git a/cmd/test_helpers.go b/cmd/test_helpers.go index 986196164..cb1810dee 100644 --- a/cmd/test_helpers.go +++ b/cmd/test_helpers.go @@ -58,12 +58,12 @@ func runTestActionCmd(t *testing.T, tests []cmdTestCase) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - store := storageFixture() _, out, err := executeActionCommandC(store, tt.cmd) if (err != nil) != tt.wantError { t.Errorf("want error = %v, got '%v'", tt.wantError, err) } + if tt.golden != "" { AssertGoldenString(t, out, tt.golden) } @@ -204,7 +204,7 @@ func compare(actual []byte, filename string) error { } expected = normalize(expected) if !bytes.Equal(expected, actual) { - return errors.Errorf("does not match golden file %s WANT: '%s' GOT: '%s'", filename, expected, actual) + return errors.Errorf("does not match golden file %s WANT: '%s'\nGOT: '%s'", filename, expected, actual) } return nil } diff --git a/testdata/experiment.tpl b/testdata/experiment.tpl index 23afa3a53..4075144bf 100644 --- a/testdata/experiment.tpl +++ b/testdata/experiment.tpl @@ -9,4 +9,4 @@ spec: duration: 2s errorRanges: - lower: 500 - url: {{ .URL }} + url: {{ .URL }} \ No newline at end of file diff --git a/testdata/output/kassert.txt b/testdata/output/kassert.txt index 507879b72..63684dab8 100644 --- a/testdata/output/kassert.txt +++ b/testdata/output/kassert.txt @@ -1,4 +1,3 @@ time=1977-09-02 22:04:05 level=info msg=experiment completed time=1977-09-02 22:04:05 level=info msg=experiment has no failure -time=1977-09-02 22:04:05 level=info msg=SLOs are satisfied time=1977-09-02 22:04:05 level=info msg=all conditions were satisfied diff --git a/testdata/output/kreport.txt b/testdata/output/kreport.txt deleted file mode 100644 index f7f0f6603..000000000 --- a/testdata/output/kreport.txt +++ /dev/null @@ -1,43 +0,0 @@ - -Experiment summary: -******************* - - Experiment completed: true - No task failures: true - Total number of tasks: 4 - Number of completed tasks: 4 - Number of completed loops: 0 - -Whether or not service level objectives (SLOs) are satisfied: -************************************************************* - - SLO Conditions | Satisfied - -------------- | --------- - http/error-rate <= 0 | true - http/latency-mean (msec) <= 500 | true - http/latency-p50 (msec) <= 1000 | true - http/latency-p50 (msec) <= 1000 | true - http/latency-p95 (msec) <= 2500 | true - http/latency-p99 (msec) <= 5000 | true - - -Latest observed values for metrics: -*********************************** - - Metric | value - ------- | ----- - http/error-count | 0.00 - http/error-rate | 0.00 - http/latency-max (msec) | 272.84 - http/latency-mean (msec) | 29.62 - http/latency-min (msec) | 11.39 - http/latency-p50 (msec) | 13.43 - http/latency-p75 (msec) | 15.00 - http/latency-p90 (msec) | 16.80 - http/latency-p95 (msec) | 254.57 - http/latency-p99 (msec) | 269.18 - http/latency-p99.9 (msec) | 272.47 - http/latency-stddev (msec) | 62.82 - http/request-count | 16.00 - - diff --git a/testdata/output/launch-with-slos.txt b/testdata/output/launch-with-slos.txt deleted file mode 100644 index e89d48487..000000000 --- a/testdata/output/launch-with-slos.txt +++ /dev/null @@ -1,6 +0,0 @@ -time=1977-09-02 22:04:05 level=info msg=created experiment.yaml file -time=1977-09-02 22:04:05 level=info msg=starting local experiment -time=1977-09-02 22:04:05 level=info msg=task 1: http: started -time=1977-09-02 22:04:05 level=info msg=task 1: http: completed -time=1977-09-02 22:04:05 level=info msg=task 2: assess: started -time=1977-09-02 22:04:05 level=info msg=task 2: assess: completed diff --git a/testdata/output/report.txt b/testdata/output/report.txt deleted file mode 100644 index ef137893e..000000000 --- a/testdata/output/report.txt +++ /dev/null @@ -1,42 +0,0 @@ - -Experiment summary: -******************* - - Experiment completed: true - No task failures: true - Total number of tasks: 4 - Number of completed tasks: 4 - -Whether or not service level objectives (SLOs) are satisfied: -************************************************************* - - SLO Conditions | Satisfied - -------------- | --------- - http/error-rate <= 0 | true - http/latency-mean (msec) <= 500 | true - http/latency-p50 (msec) <= 1000 | true - http/latency-p50 (msec) <= 1000 | true - http/latency-p95 (msec) <= 2500 | true - http/latency-p99 (msec) <= 5000 | true - - -Latest observed values for metrics: -*********************************** - - Metric | value - ------- | ----- - http/error-count | 0.00 - http/error-rate | 0.00 - http/latency-max (msec) | 272.84 - http/latency-mean (msec) | 29.62 - http/latency-min (msec) | 11.39 - http/latency-p50 (msec) | 13.43 - http/latency-p75 (msec) | 15.00 - http/latency-p90 (msec) | 16.80 - http/latency-p95 (msec) | 254.57 - http/latency-p99 (msec) | 269.18 - http/latency-p99.9 (msec) | 272.47 - http/latency-stddev (msec) | 62.82 - http/request-count | 16.00 - - From d2e2cb22bef6b3ffcb545f287fe0c93ada5731b0 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 08:04:25 -0400 Subject: [PATCH 040/121] Refactor MockMetricsServer as test helper Signed-off-by: Alan Cha --- action/run_test.go | 74 ++++----------------------------- base/collect_grpc_test.go | 24 +++++------ base/collect_http_test.go | 86 +++++++-------------------------------- base/experiment_test.go | 16 ++++---- base/notify_test.go | 14 +++---- base/test_helpers.go | 59 +++++++++++++++++++++++++++ cmd/kassert_test.go | 8 ++-- cmd/krun_test.go | 8 ++-- driver/filedriver_test.go | 8 ++-- driver/kubedriver_test.go | 8 ++-- 10 files changed, 123 insertions(+), 182 deletions(-) diff --git a/action/run_test.go b/action/run_test.go index a77d94594..0965cccbe 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -12,7 +12,6 @@ import ( "fortio.org/fortio/fhttp" "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/driver" - "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" "helm.sh/helm/v3/pkg/cli" corev1 "k8s.io/api/core/v1" @@ -24,65 +23,6 @@ const ( myNamespace = "myNamespace" ) -// TODO: duplicated from collect_http_test.go -func startHTTPMock(t *testing.T) { - httpmock.Activate() - t.Cleanup(httpmock.DeactivateAndReset) - httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) -} - -type DashboardCallback func(req *http.Request) - -type mockMetricsServerInput struct { - metricsServerURL string - - // GET /httpDashboard - httpDashboardCallback DashboardCallback - // GET /grpcDashboard - gRPCDashboardCallback DashboardCallback - // PUT /performanceResult - performanceResultCallback DashboardCallback -} - -func mockMetricsServer(input mockMetricsServerInput) { - // GET /httpDashboard - httpmock.RegisterResponder( - http.MethodGet, - input.metricsServerURL+base.HTTPDashboardPath, - func(req *http.Request) (*http.Response, error) { - if input.httpDashboardCallback != nil { - input.httpDashboardCallback(req) - } - - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - - // GET /grpcDashboard - httpmock.RegisterResponder( - http.MethodGet, - input.metricsServerURL+base.GRPCDashboardPath, - func(req *http.Request) (*http.Response, error) { - if input.gRPCDashboardCallback != nil { - input.gRPCDashboardCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - - // PUT /performanceResult - httpmock.RegisterResponder( - http.MethodPut, - input.metricsServerURL+base.PerformanceResultPath, - func(req *http.Request) (*http.Response, error) { - if input.performanceResultCallback != nil { - input.performanceResultCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) -} - func TestKubeRun(t *testing.T) { // define METRICS_SERVER_URL metricsServerURL := "http://iter8.default:8080" @@ -96,11 +36,11 @@ func TestKubeRun(t *testing.T) { mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) // mock metrics server - startHTTPMock(t) + base.StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + base.MockMetricsServer(base.MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -118,9 +58,9 @@ func TestKubeRun(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - // if _, ok := bodyFortioResult.EndpointResults[call]; !ok { - // assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", call)) - // } + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", url)) + } }, }) diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index b936c3a06..6e2a345d1 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -34,11 +34,11 @@ func TestRunCollectGRPCUnary(t *testing.T) { call := "helloworld.Greeter.SayHello" // mock metrics server - startHTTPMock(t) + StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -152,11 +152,11 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { bidirectionalCall := "helloworld.Greeter.SayHelloBidi" // mock metrics server - startHTTPMock(t) + StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -267,11 +267,11 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { bidirectionalCall := "helloworld.Greeter.SayHelloBidi" // mock metrics server - startHTTPMock(t) + StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 16d286661..18fb9db6e 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -26,64 +26,6 @@ const ( myNamespace = "myNamespace" ) -func startHTTPMock(t *testing.T) { - httpmock.Activate() - t.Cleanup(httpmock.DeactivateAndReset) - httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) -} - -type DashboardCallback func(req *http.Request) - -type mockMetricsServerInput struct { - metricsServerURL string - - // GET /httpDashboard - httpDashboardCallback DashboardCallback - // GET /grpcDashboard - gRPCDashboardCallback DashboardCallback - // PUT /performanceResult - performanceResultCallback DashboardCallback -} - -func mockMetricsServer(input mockMetricsServerInput) { - // GET /httpDashboard - httpmock.RegisterResponder( - http.MethodGet, - input.metricsServerURL+HTTPDashboardPath, - func(req *http.Request) (*http.Response, error) { - if input.httpDashboardCallback != nil { - input.httpDashboardCallback(req) - } - - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - - // GET /grpcDashboard - httpmock.RegisterResponder( - http.MethodGet, - input.metricsServerURL+GRPCDashboardPath, - func(req *http.Request) (*http.Response, error) { - if input.gRPCDashboardCallback != nil { - input.gRPCDashboardCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - - // PUT /performanceResult - httpmock.RegisterResponder( - http.MethodPut, - input.metricsServerURL+PerformanceResultPath, - func(req *http.Request) (*http.Response, error) { - if input.performanceResultCallback != nil { - input.performanceResultCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) -} - func TestRunCollectHTTP(t *testing.T) { // define METRICS_SERVER_URL metricsServerURL := "http://iter8.default:8080" @@ -109,11 +51,11 @@ func TestRunCollectHTTP(t *testing.T) { url := fmt.Sprintf("http://localhost:%d/", addr.Port) + foo // mock metrics server - startHTTPMock(t) + StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -240,11 +182,11 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { endpoint2URL := baseURL + bar // mock metrics server - startHTTPMock(t) + StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -394,11 +336,11 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { endpoint2URL := baseURL + bar // mock metrics server - startHTTPMock(t) + StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -489,7 +431,7 @@ func TestErrorCode(t *testing.T) { } func TestPutPerformanceResultToMetricsService(t *testing.T) { - startHTTPMock(t) + StartHTTPMock(t) metricsServerURL := "http://my-server.com" namespace := "my-namespace" @@ -533,7 +475,7 @@ func TestRunCollectHTTPGrafana(t *testing.T) { metricsServerCalled := false namespace := "default" experiment := "default" - startHTTPMock(t) + StartHTTPMock(t) httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, func(req *http.Request) (*http.Response, error) { metricsServerCalled = true diff --git a/base/experiment_test.go b/base/experiment_test.go index 98be0bafc..4c2ad216e 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -44,11 +44,11 @@ func TestRunningTasks(t *testing.T) { mux.HandleFunc("/get", GetTrackingHandler(&verifyHandlerCalled)) // mock metrics server - startHTTPMock(t) + StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -118,11 +118,11 @@ func TestRunExperiment(t *testing.T) { mux.HandleFunc("/get", GetTrackingHandler(&verifyHandlerCalled)) // mock metrics server - startHTTPMock(t) + StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters diff --git a/base/notify_test.go b/base/notify_test.go index 5c5a652b2..761bb9072 100644 --- a/base/notify_test.go +++ b/base/notify_test.go @@ -30,7 +30,7 @@ func getNotifyTask(t *testing.T, n notifyInputs) *notifyTask { // GET method func TestNotify(t *testing.T) { _ = os.Chdir(t.TempDir()) - startHTTPMock(t) + StartHTTPMock(t) nt := getNotifyTask(t, notifyInputs{ URL: testNotifyURL, @@ -63,7 +63,7 @@ type testNotification struct { // POST method and PayloadTemplateURL func TestNotifyWithPayload(t *testing.T) { _ = os.Chdir(t.TempDir()) - startHTTPMock(t) + StartHTTPMock(t) nt := getNotifyTask(t, notifyInputs{ Method: http.MethodPost, @@ -133,7 +133,7 @@ func TestNotifyWithPayload(t *testing.T) { // GET method and headers and query parameters func TestNotifyWithHeadersAndQueryParams(t *testing.T) { _ = os.Chdir(t.TempDir()) - startHTTPMock(t) + StartHTTPMock(t) nt := getNotifyTask(t, notifyInputs{ URL: testNotifyURL, @@ -177,7 +177,7 @@ func TestNotifyWithHeadersAndQueryParams(t *testing.T) { // bad method and SoftFailure func TestNotifyBadMethod(t *testing.T) { _ = os.Chdir(t.TempDir()) - startHTTPMock(t) + StartHTTPMock(t) nt := getNotifyTask(t, notifyInputs{ URL: testNotifyURL, @@ -197,7 +197,7 @@ func TestNotifyBadMethod(t *testing.T) { // test should fail assert.Error(t, err) - startHTTPMock(t) + StartHTTPMock(t) nt = getNotifyTask(t, notifyInputs{ URL: testNotifyURL, @@ -221,7 +221,7 @@ func TestNotifyBadMethod(t *testing.T) { // default to POST method with PayloadTemplateURL func TestNotifyPayloadTemplateURLDefaultMethod(t *testing.T) { _ = os.Chdir(t.TempDir()) - startHTTPMock(t) + StartHTTPMock(t) nt := getNotifyTask(t, notifyInputs{ URL: testNotifyURL, @@ -269,7 +269,7 @@ func TestNotifyPayloadTemplateURLDefaultMethod(t *testing.T) { // No URL func TestNotifyNoURL(t *testing.T) { _ = os.Chdir(t.TempDir()) - startHTTPMock(t) + StartHTTPMock(t) nt := getNotifyTask(t, notifyInputs{ SoftFailure: false, diff --git a/base/test_helpers.go b/base/test_helpers.go index 00cc8afde..f50ac09f4 100644 --- a/base/test_helpers.go +++ b/base/test_helpers.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" ) @@ -63,3 +64,61 @@ func GetTrackingHandler(breadcrumb *bool) func(w http.ResponseWriter, r *http.Re w.WriteHeader(200) } } + +func StartHTTPMock(t *testing.T) { + httpmock.Activate() + t.Cleanup(httpmock.DeactivateAndReset) + httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) +} + +type DashboardCallback func(req *http.Request) + +type MockMetricsServerInput struct { + MetricsServerURL string + + // GET /httpDashboard + HTTPDashboardCallback DashboardCallback + // GET /grpcDashboard + GRPCDashboardCallback DashboardCallback + // PUT /performanceResult + PerformanceResultCallback DashboardCallback +} + +func MockMetricsServer(input MockMetricsServerInput) { + // GET /httpDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.MetricsServerURL+HTTPDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.HTTPDashboardCallback != nil { + input.HTTPDashboardCallback(req) + } + + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // GET /grpcDashboard + httpmock.RegisterResponder( + http.MethodGet, + input.MetricsServerURL+GRPCDashboardPath, + func(req *http.Request) (*http.Response, error) { + if input.GRPCDashboardCallback != nil { + input.GRPCDashboardCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // PUT /performanceResult + httpmock.RegisterResponder( + http.MethodPut, + input.MetricsServerURL+PerformanceResultPath, + func(req *http.Request) (*http.Response, error) { + if input.PerformanceResultCallback != nil { + input.PerformanceResultCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) +} diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go index a5c9e2c79..1484d5dd1 100644 --- a/cmd/kassert_test.go +++ b/cmd/kassert_test.go @@ -97,11 +97,11 @@ func TestKAssert(t *testing.T) { mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) // mock metrics server - startHTTPMock(t) + base.StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + base.MockMetricsServer(base.MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters diff --git a/cmd/krun_test.go b/cmd/krun_test.go index d9f83149e..201c3a24b 100644 --- a/cmd/krun_test.go +++ b/cmd/krun_test.go @@ -30,11 +30,11 @@ func TestKRun(t *testing.T) { mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) // mock metrics server - startHTTPMock(t) + base.StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + base.MockMetricsServer(base.MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 65528f308..545eb5814 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -91,11 +91,11 @@ func TestLocalRun(t *testing.T) { mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) // mock metrics server - startHTTPMock(t) + base.StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + base.MockMetricsServer(base.MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index d1da78ef8..eabd9c556 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -80,11 +80,11 @@ func TestKubeRun(t *testing.T) { mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) // mock metrics server - startHTTPMock(t) + base.StartHTTPMock(t) metricsServerCalled := false - mockMetricsServer(mockMetricsServerInput{ - metricsServerURL: metricsServerURL, - performanceResultCallback: func(req *http.Request) { + base.MockMetricsServer(base.MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters From 159fffe2029db3c982d6a6b4eb18b8be5ffd9216 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 08:04:52 -0400 Subject: [PATCH 041/121] Remove metrics and extraneous Signed-off-by: Alan Cha --- base/experiment.go | 10 -------- base/metrics.go | 48 ------------------------------------ base/must_merge_overwrite.go | 40 ------------------------------ base/util.go | 5 ---- 4 files changed, 103 deletions(-) delete mode 100644 base/metrics.go delete mode 100644 base/must_merge_overwrite.go diff --git a/base/experiment.go b/base/experiment.go index 9dba38412..7077742c7 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -87,16 +87,6 @@ type Insights struct { VersionNames []VersionInfo `json:"versionNames" yaml:"versionNames"` } -// MetricMeta describes a metric -type MetricMeta struct { - // Description is a human readable description of the metric - Description string `json:"description" yaml:"description"` - // Units for this metric (if any) - Units *string `json:"units,omitempty" yaml:"units,omitempty"` - // Type of the metric. Example: counter - Type MetricType `json:"type" yaml:"type"` -} - // VersionInfo is basic information about a version type VersionInfo struct { // Version name diff --git a/base/metrics.go b/base/metrics.go deleted file mode 100644 index 136e59141..000000000 --- a/base/metrics.go +++ /dev/null @@ -1,48 +0,0 @@ -package base - -// HistBucket is a single bucket in a histogram -type HistBucket struct { - // Lower endpoint of a histogram bucket - Lower float64 `json:"lower" yaml:"lower"` - // Upper endpoint of a histogram bucket - Upper float64 `json:"upper" yaml:"upper"` - // Count is the frequency count of the bucket - Count uint64 `json:"count" yaml:"count"` -} - -// MetricType identifies the type of the metric. -type MetricType string - -// AggregationType identifies the type of the metric aggregator. -type AggregationType string - -const ( - // CounterMetricType corresponds to Prometheus Counter metric type - CounterMetricType MetricType = "Counter" - // GaugeMetricType corresponds to Prometheus Gauge metric type - GaugeMetricType MetricType = "Gauge" - // HistogramMetricType corresponds to a Histogram metric type - HistogramMetricType MetricType = "Histogram" - // SampleMetricType corresponds to a Sample metric type - SampleMetricType MetricType = "Sample" - // SummaryMetricType corresponds to a Summary metric type - SummaryMetricType MetricType = "Summary" - - // decimalRegex is the regex used to identify percentiles - decimalRegex = `^([\d]+(\.[\d]*)?|\.[\d]+)$` - - // CountAggregator corresponds to aggregation of type count - CountAggregator AggregationType = "count" - // MeanAggregator corresponds to aggregation of type mean - MeanAggregator AggregationType = "mean" - // StdDevAggregator corresponds to aggregation of type stddev - StdDevAggregator AggregationType = "stddev" - // MinAggregator corresponds to aggregation of type min - MinAggregator AggregationType = "min" - // MaxAggregator corresponds to aggregation of type max - MaxAggregator AggregationType = "max" - // PercentileAggregator corresponds to aggregation of type max - PercentileAggregator AggregationType = "percentile" - // PercentileAggregatorPrefix corresponds to prefix for percentiles - PercentileAggregatorPrefix = "p" -) diff --git a/base/must_merge_overwrite.go b/base/must_merge_overwrite.go deleted file mode 100644 index 2ae73dc6a..000000000 --- a/base/must_merge_overwrite.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright (C) 2013-2020 Masterminds - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -package base - -import ( - "github.com/imdario/mergo" - "github.com/iter8-tools/iter8/base/log" -) - -// mustMergeOverwrite merge maps giving precedence to the right side -func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - // the following log line is the diff between the original sprig func and ours - log.Logger.Error(err) - return nil, err - } - } - return dst, nil -} diff --git a/base/util.go b/base/util.go index 1ff99e8b1..a93f8e0af 100644 --- a/base/util.go +++ b/base/util.go @@ -44,11 +44,6 @@ func float32Pointer(f float32) *float32 { return &f } -// float64Pointer takes an float64 as input, creates a new variable with the input value, and returns a pointer to the variable -func float64Pointer(f float64) *float64 { - return &f -} - // StringPointer takes string as input, creates a new variable with the input value, and returns a pointer to the variable func StringPointer(s string) *string { return &s From 670b165073497271e60f817576202b041ee0a477 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 08:11:49 -0400 Subject: [PATCH 042/121] Remove extraneous code Signed-off-by: Alan Cha --- cmd/kassert_test.go | 60 --------------------------------------- driver/filedriver_test.go | 53 ---------------------------------- 2 files changed, 113 deletions(-) diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go index 1484d5dd1..e997dcaae 100644 --- a/cmd/kassert_test.go +++ b/cmd/kassert_test.go @@ -13,7 +13,6 @@ import ( "fortio.org/fortio/fhttp" "github.com/iter8-tools/iter8/base" id "github.com/iter8-tools/iter8/driver" - "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -25,65 +24,6 @@ const ( myNamespace = "myNamespace" ) -// TODO: duplicated from collect_http_test.go -func startHTTPMock(t *testing.T) { - httpmock.Activate() - t.Cleanup(httpmock.DeactivateAndReset) - httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) -} - -type DashboardCallback func(req *http.Request) - -type mockMetricsServerInput struct { - metricsServerURL string - - // GET /httpDashboard - httpDashboardCallback DashboardCallback - // GET /grpcDashboard - gRPCDashboardCallback DashboardCallback - // PUT /performanceResult - performanceResultCallback DashboardCallback -} - -func mockMetricsServer(input mockMetricsServerInput) { - // GET /httpDashboard - httpmock.RegisterResponder( - http.MethodGet, - input.metricsServerURL+base.HTTPDashboardPath, - func(req *http.Request) (*http.Response, error) { - if input.httpDashboardCallback != nil { - input.httpDashboardCallback(req) - } - - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - - // GET /grpcDashboard - httpmock.RegisterResponder( - http.MethodGet, - input.metricsServerURL+base.GRPCDashboardPath, - func(req *http.Request) (*http.Response, error) { - if input.gRPCDashboardCallback != nil { - input.gRPCDashboardCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - - // PUT /performanceResult - httpmock.RegisterResponder( - http.MethodPut, - input.metricsServerURL+base.PerformanceResultPath, - func(req *http.Request) (*http.Response, error) { - if input.performanceResultCallback != nil { - input.performanceResultCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) -} - func TestKAssert(t *testing.T) { // define METRICS_SERVER_URL metricsServerURL := "http://iter8.default:8080" diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 545eb5814..0d8fab477 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -25,59 +25,6 @@ func startHTTPMock(t *testing.T) { httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) } -// TODO: duplicated from collect_http_test.go -type DashboardCallback func(req *http.Request) - -type mockMetricsServerInput struct { - metricsServerURL string - - // GET /httpDashboard - httpDashboardCallback DashboardCallback - // GET /grpcDashboard - gRPCDashboardCallback DashboardCallback - // PUT /performanceResult - performanceResultCallback DashboardCallback -} - -func mockMetricsServer(input mockMetricsServerInput) { - // GET /httpDashboard - httpmock.RegisterResponder( - http.MethodGet, - input.metricsServerURL+base.HTTPDashboardPath, - func(req *http.Request) (*http.Response, error) { - if input.httpDashboardCallback != nil { - input.httpDashboardCallback(req) - } - - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - - // GET /grpcDashboard - httpmock.RegisterResponder( - http.MethodGet, - input.metricsServerURL+base.GRPCDashboardPath, - func(req *http.Request) (*http.Response, error) { - if input.gRPCDashboardCallback != nil { - input.gRPCDashboardCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - - // PUT /performanceResult - httpmock.RegisterResponder( - http.MethodPut, - input.metricsServerURL+base.PerformanceResultPath, - func(req *http.Request) (*http.Response, error) { - if input.performanceResultCallback != nil { - input.performanceResultCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) -} - func TestLocalRun(t *testing.T) { // define METRICS_SERVER_URL metricsServerURL := "http://iter8.default:8080" From f789e8e40af4d5875630c189322d1d7681ea8091 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 08:12:10 -0400 Subject: [PATCH 043/121] Add comments to mock server functions/types Signed-off-by: Alan Cha --- base/test_helpers.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/base/test_helpers.go b/base/test_helpers.go index f50ac09f4..8bb29c7b2 100644 --- a/base/test_helpers.go +++ b/base/test_helpers.go @@ -65,25 +65,32 @@ func GetTrackingHandler(breadcrumb *bool) func(w http.ResponseWriter, r *http.Re } } +// StartHTTPMock activates and cleanups httpmock func StartHTTPMock(t *testing.T) { httpmock.Activate() t.Cleanup(httpmock.DeactivateAndReset) httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) } -type DashboardCallback func(req *http.Request) +// MetricsServerCallback is a callback function for when the particular metrics server endpoint +// is called +type MetricsServerCallback func(req *http.Request) +// MockMetricsServerInput is the input for MockMetricsServer() +// allows the user to provide callbacks when particular endpoints are called type MockMetricsServerInput struct { MetricsServerURL string // GET /httpDashboard - HTTPDashboardCallback DashboardCallback + HTTPDashboardCallback MetricsServerCallback // GET /grpcDashboard - GRPCDashboardCallback DashboardCallback + GRPCDashboardCallback MetricsServerCallback // PUT /performanceResult - PerformanceResultCallback DashboardCallback + PerformanceResultCallback MetricsServerCallback } +// MockMetricsServer is a mock metrics server +// use the callback functions in the MockMetricsServerInput to test if those endpoints are called func MockMetricsServer(input MockMetricsServerInput) { // GET /httpDashboard httpmock.RegisterResponder( From b0e3e97fdba5906197630fde32835fc1f6254373 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 08:15:02 -0400 Subject: [PATCH 044/121] Remove extraneous Signed-off-by: Alan Cha --- driver/filedriver_test.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 0d8fab477..01a082b2b 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -10,7 +10,6 @@ import ( "fortio.org/fortio/fhttp" "github.com/iter8-tools/iter8/base" - "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" ) @@ -19,12 +18,6 @@ const ( myNamespace = "myNamespace" ) -func startHTTPMock(t *testing.T) { - httpmock.Activate() - t.Cleanup(httpmock.DeactivateAndReset) - httpmock.RegisterNoResponder(httpmock.InitialTransport.RoundTrip) -} - func TestLocalRun(t *testing.T) { // define METRICS_SERVER_URL metricsServerURL := "http://iter8.default:8080" From dafa7eeb7af12b99adf7372cf2e0a92a6e0e5397 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 09:10:29 -0400 Subject: [PATCH 045/121] Remove extraneous Signed-off-by: Alan Cha --- testdata/experiment_fails.tpl | 9 --------- testdata/experiment_grpc.yaml | 4 ++-- 2 files changed, 2 insertions(+), 11 deletions(-) delete mode 100644 testdata/experiment_fails.tpl diff --git a/testdata/experiment_fails.tpl b/testdata/experiment_fails.tpl deleted file mode 100644 index 986adb1de..000000000 --- a/testdata/experiment_fails.tpl +++ /dev/null @@ -1,9 +0,0 @@ -spec: -# task 1: generate HTTP requests for application URL -# collect Iter8's built-in HTTP latency and error-related metrics -- task: http - with: - duration: 2s - errorRanges: - - lower: 500 - url: {{ .URL }} diff --git a/testdata/experiment_grpc.yaml b/testdata/experiment_grpc.yaml index 32c97f99d..60bc0169d 100644 --- a/testdata/experiment_grpc.yaml +++ b/testdata/experiment_grpc.yaml @@ -1,6 +1,6 @@ spec: - # task 1: generate gRPC requests for application - # collect Iter8's built-in gRPC latency and error-related metrics +# task 1: generate gRPC requests for application +# collect Iter8's built-in gRPC latency and error-related metrics - task: grpc with: total: 200 From 95f2159541e0ed9bf25c5572705eb4a247d98a26 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 09:10:45 -0400 Subject: [PATCH 046/121] Add routemap tests Signed-off-by: Alan Cha --- controllers/routemap_test.go | 69 ++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/controllers/routemap_test.go b/controllers/routemap_test.go index 0a43260ee..97c061b27 100644 --- a/controllers/routemap_test.go +++ b/controllers/routemap_test.go @@ -414,3 +414,72 @@ routingTemplates: return assert.NoError(t, err) && assert.Equal(t, "11451027137128994800", signature) }, time.Second*2, time.Millisecond*100) } + +func TestGetNamespace(t *testing.T) { + a := "a" + b := "b" + c := "c" + myName := "myName" + myNamespace := "myNamespace" + + rm := routemap{ + ObjectMeta: metav1.ObjectMeta{ + Name: myName, + Namespace: myNamespace, + }, + Versions: []version{ + {Signature: &a}, + {Signature: &b}, + {Signature: &c}, + }, + } + + assert.Equal(t, myNamespace, rm.GetNamespace()) +} + +func TestGetName(t *testing.T) { + a := "a" + b := "b" + c := "c" + myName := "myName" + myNamespace := "myNamespace" + + rm := routemap{ + ObjectMeta: metav1.ObjectMeta{ + Name: myName, + Namespace: myNamespace, + }, + Versions: []version{ + {Signature: &a}, + {Signature: &b}, + {Signature: &c}, + }, + } + + assert.Equal(t, myName, rm.GetName()) +} + +func TestGetVersions(t *testing.T) { + a := "a" + b := "b" + c := "c" + + rm := routemap{ + Versions: []version{ + {Signature: &a}, + {Signature: &b}, + {Signature: &c}, + }, + } + + versions := rm.GetVersions() + assert.Equal(t, 3, len(versions)) +} + +func TestGetSignature(t *testing.T) { + a := "a" + + v := version{Signature: &a} + + assert.Equal(t, &a, v.GetSignature()) +} From 26f281bc94d86aef5651db9e86580a78d1c8a79e Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 09:24:05 -0400 Subject: [PATCH 047/121] Remove SLOs Signed-off-by: Alan Cha --- README.md | 14 ++++--------- base/experiment.go | 35 +------------------------------- cmd/docs_test.go | 2 +- cmd/kassert.go | 8 ++++---- testdata/output/assert-slos.txt | 4 ---- testdata/output/kassertfails.txt | 9 -------- testdata/output/run.txt | 9 -------- 7 files changed, 10 insertions(+), 71 deletions(-) delete mode 100644 testdata/output/assert-slos.txt delete mode 100644 testdata/output/kassertfails.txt delete mode 100644 testdata/output/run.txt diff --git a/README.md b/README.md index 2b47808ac..2a5201e21 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,8 @@ Iter8 is the Kubernetes release optimizer built for DevOps, MLOps, SRE and data Iter8 supports the following use-cases. -1. Performance testing and SLO validation of HTTP services. -2. Performance testing and SLO validation of gRPC services. -3. SLO validation using custom metrics from any database(s) or REST API(s). +1. Performance testing of HTTP services. +2. Performance testing of gRPC services. ## :rocket: Iter8 experiment @@ -22,13 +21,8 @@ Iter8 introduces the notion of an experiment, which is a list of configurable ta Iter8 packs a number of powerful features that facilitate Kubernetes app testing and experimentation. They include the following. 1. **Generating load and collecting built-in metrics for HTTP and gRPC services.** Simplifies performance testing by eliminating the need to setup and use metrics databases. -2. **Well-defined notion of service-level objectives (SLOs).** Makes it simple to define and verify SLOs in experiments. -3. **Custom metrics.** Enables the use of custom metrics from any database(s) or REST API(s) in experiments. -4. **Readiness check.** The performance testing portion of the experiment begins only after the service is ready. -5. **HTML/text reports.** Promotes human understanding of experiment results through visual insights. -6. **Assertions.** Verifies whether the target app satisfies the specified SLOs or not after an experiment. Simplifies automation in CI/CD/GitOps pipelines: branch off into different paths depending upon whether the assertions are true or false. -7. **Multi-loop experiments.** Experiment tasks can be executed periodically (multi-loop) instead of just once (single-loop). This enables Iter8 to refresh metric values and perform SLO validation using the latest metric values during each loop. -8. **Experiment anywhere.** Iter8 experiments can be launched inside a Kubernetes cluster, in local environments, or inside a GitHub Actions pipeline. +2. **Readiness check.** The performance testing portion of the experiment begins only after the service is ready. +3. **Experiment anywhere.** Iter8 experiments can be launched inside a Kubernetes cluster, in local environments, or inside a GitHub Actions pipeline. Please see [https://iter8.tools](https://iter8.tools) for the complete documentation. diff --git a/base/experiment.go b/base/experiment.go index 7077742c7..2788ecfb0 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -75,10 +75,7 @@ type ExperimentResult struct { Iter8Version string `json:"iter8Version" yaml:"iter8Version"` } -// Insights records the number of versions in this experiment, -// metric values and SLO indicators for each version, -// metrics metadata for all metrics, and -// SLO definitions for all SLOs +// Insights records the number of versions in this experiment type Insights struct { // NumVersions is the number of app versions detected by Iter8 NumVersions int `json:"numVersions" yaml:"numVersions"` @@ -114,35 +111,6 @@ type RewardsWinners struct { Min []int `json:"min,omitempty" yaml:"min,omitempty"` } -// SLO is a service level objective -type SLO struct { - // Metric is the fully qualified metric name in the backendName/metricName format - Metric string `json:"metric" yaml:"metric"` - - // Limit is the acceptable limit for this metric - Limit float64 `json:"limit" yaml:"limit"` -} - -// SLOLimits specify upper or lower limits for metrics -type SLOLimits struct { - // Upper limits for metrics - Upper []SLO `json:"upper,omitempty" yaml:"upper,omitempty"` - - // Lower limits for metrics - Lower []SLO `json:"lower,omitempty" yaml:"lower,omitempty"` -} - -// SLOResults specify the results of SLO evaluations -type SLOResults struct { - // Upper limits for metrics - // Upper[i][j] specifies if upper SLO i is satisfied by version j - Upper [][]bool `json:"upper,omitempty" yaml:"upper,omitempty"` - - // Lower limits for metrics - // Lower[i][j] specifies if lower SLO i is satisfied by version j - Lower [][]bool `json:"lower,omitempty" yaml:"lower,omitempty"` -} - // TaskMeta provides common fields used across all tasks type TaskMeta struct { // Task is the name of the task @@ -152,7 +120,6 @@ type TaskMeta struct { Run *string `json:"run,omitempty" yaml:"run,omitempty"` // If is the condition used to determine if this task needs to run // If the condition is not satisfied, then it is skipped in an experiment - // Example: SLOs() If *string `json:"if,omitempty" yaml:"if,omitempty"` } diff --git a/cmd/docs_test.go b/cmd/docs_test.go index d6641a4ec..92260b687 100644 --- a/cmd/docs_test.go +++ b/cmd/docs_test.go @@ -9,7 +9,7 @@ import ( func TestDocs(t *testing.T) { _ = os.Chdir(t.TempDir()) tests := []cmdTestCase{ - // assert, SLOs + // assert { name: "create docs", cmd: fmt.Sprintf("docs --commandDocsDir %v", t.TempDir()), diff --git a/cmd/kassert.go b/cmd/kassert.go index 3dd4a95ff..d59f6d79c 100644 --- a/cmd/kassert.go +++ b/cmd/kassert.go @@ -17,14 +17,14 @@ Assert if the result of a Kubernetes experiment satisfies the specified conditio Assertions are especially useful for automation inside CI/CD/GitOps pipelines. -Supported conditions are 'completed', 'nofailure', 'slos', which indicate that the experiment has completed, none of the tasks have failed, and the SLOs are satisfied. +Supported conditions are 'completed' and 'nofailure' which indicate that the experiment has completed and none of the tasks have failed. - iter8 k assert -c completed -c nofailure -c slos - # same as iter8 k assert -c completed,nofailure,slos + iter8 k assert -c completed -c nofailure + # same as iter8 k assert -c completed,nofailure You can optionally specify a timeout, which is the maximum amount of time to wait for the conditions to be satisfied: - iter8 k assert -c completed,nofailure,slos -t 5s + iter8 k assert -c completed,nofailure -t 5s ` // newAssertCmd creates the Kubernetes assert command diff --git a/testdata/output/assert-slos.txt b/testdata/output/assert-slos.txt deleted file mode 100644 index 507879b72..000000000 --- a/testdata/output/assert-slos.txt +++ /dev/null @@ -1,4 +0,0 @@ -time=1977-09-02 22:04:05 level=info msg=experiment completed -time=1977-09-02 22:04:05 level=info msg=experiment has no failure -time=1977-09-02 22:04:05 level=info msg=SLOs are satisfied -time=1977-09-02 22:04:05 level=info msg=all conditions were satisfied diff --git a/testdata/output/kassertfails.txt b/testdata/output/kassertfails.txt deleted file mode 100644 index e180b06b0..000000000 --- a/testdata/output/kassertfails.txt +++ /dev/null @@ -1,9 +0,0 @@ -time=1977-09-02 22:04:05 level=info msg=experiment completed -time=1977-09-02 22:04:05 level=info msg=experiment has no failure -time=1977-09-02 22:04:05 level=info msg=SLOs are not satisfied -time=1977-09-02 22:04:05 level=info msg=experiment completed -time=1977-09-02 22:04:05 level=info msg=experiment has no failure -time=1977-09-02 22:04:05 level=info msg=SLOs are not satisfied -time=1977-09-02 22:04:05 level=info msg=not all conditions were satisfied -time=1977-09-02 22:04:05 level=error msg=assert conditions failed -time=1977-09-02 22:04:05 level=error msg=assert conditions failed diff --git a/testdata/output/run.txt b/testdata/output/run.txt deleted file mode 100644 index 08360495e..000000000 --- a/testdata/output/run.txt +++ /dev/null @@ -1,9 +0,0 @@ -time=1977-09-02 22:04:05 level=info msg=task 1: http: started -time=1977-09-02 22:04:05 level=info msg=task 1: http: completed -time=1977-09-02 22:04:05 level=info msg=task 2: assess: started -time=1977-09-02 22:04:05 level=info msg=task 2: assess: completed -time=1977-09-02 22:04:05 level=info msg=task 3: run: started -time=1977-09-02 22:04:05 level=info msg=task 3: run: completed -time=1977-09-02 22:04:05 level=info msg=task 4: run: started -time=1977-09-02 22:04:05 level=info msg=task 4: run: skipped stack-trace=below ... -::Trace:: false condition: not SLOs() From df81e863d05ddf4e1f80785308d646787df790b8 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 09:46:09 -0400 Subject: [PATCH 048/121] Remove error message Signed-off-by: Alan Cha --- base/notify.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/notify.go b/base/notify.go index 380e24540..5800cf35d 100644 --- a/base/notify.go +++ b/base/notify.go @@ -110,7 +110,7 @@ func (t *notifyTask) getPayload(exp *Experiment) (string, error) { return "", nil } -// initializeDefaults sets default values for the custom metrics task +// initializeDefaults sets default values func (t *notifyTask) initializeDefaults() { // set default HTTP method if t.With.Method == "" { From 24af8c85746b441aaa597763b9d91da4d7216567 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 09:51:20 -0400 Subject: [PATCH 049/121] Add UnmarshalJSON() test Signed-off-by: Alan Cha --- base/experiment_test.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/base/experiment_test.go b/base/experiment_test.go index 4c2ad216e..f14adc291 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -178,3 +178,30 @@ func TestFailExperiment(t *testing.T) { exp.failExperiment() assert.False(t, exp.NoFailure()) } + +func TestUnmarshalJSONError(t *testing.T) { + tests := []struct { + specBytes string + errMessage string + }{ + { + specBytes: "hello world", + errMessage: `invalid character 'h' looking for beginning of value`, + }, + { + specBytes: "[{}]", + errMessage: `invalid task found without a task name or a run command`, + }, + { + specBytes: `[{"task":"hello world"}]`, + errMessage: `unknown task: hello world`, + }, + } + + for _, test := range tests { + exp := ExperimentSpec{} + err := exp.UnmarshalJSON([]byte(test.specBytes)) + assert.Error(t, err) + assert.EqualError(t, err, test.errMessage) + } +} From 31802a96970a36ce0394b68f7a0766407befed30 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 3 Aug 2023 09:56:20 -0400 Subject: [PATCH 050/121] Remove rewards Signed-off-by: Alan Cha --- base/experiment.go | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/base/experiment.go b/base/experiment.go index 2788ecfb0..d5fedb2a6 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -93,24 +93,6 @@ type VersionInfo struct { Track string `json:"track" yaml:"track"` } -// Rewards specify max and min rewards -type Rewards struct { - // Max is list of reward metrics where the version with the maximum value wins - Max []string `json:"max,omitempty" yaml:"max,omitempty"` - // Min is list of reward metrics where the version with the minimum value wins - Min []string `json:"min,omitempty" yaml:"min,omitempty"` -} - -// RewardsWinners are indices of the best versions for each reward metric -type RewardsWinners struct { - // Max rewards - // Max[i] specifies the index of the winner of reward metric Rewards.Max[i] - Max []int `json:"max,omitempty" yaml:"max,omitempty"` - // Min rewards - // Min[i] specifies the index of the winner of reward metric Rewards.Min[i] - Min []int `json:"min,omitempty" yaml:"min,omitempty"` -} - // TaskMeta provides common fields used across all tasks type TaskMeta struct { // Task is the name of the task From ed5999271097932a328fc619372432a8df5028e5 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 4 Aug 2023 09:15:36 -0400 Subject: [PATCH 051/121] Add better multiple endpoint support to HTTP Signed-off-by: Alan Cha --- action/run_test.go | 2 +- base/collect_grpc.go | 3 - base/collect_grpc_test.go | 6 +- base/collect_http.go | 20 ++--- base/collect_http_test.go | 185 +++++++++++++++++++++++--------------- base/experiment_test.go | 4 +- base/metrics.go | 13 +++ cmd/kassert_test.go | 4 +- cmd/krun_test.go | 4 +- driver/filedriver_test.go | 2 +- driver/kubedriver_test.go | 2 +- metrics/server.go | 4 +- metrics/server_test.go | 2 +- 13 files changed, 146 insertions(+), 105 deletions(-) create mode 100644 base/metrics.go diff --git a/action/run_test.go b/action/run_test.go index 0965cccbe..d76244d52 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -53,7 +53,7 @@ func TestKubeRun(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := base.FortioResult{} + bodyFortioResult := base.HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index bbbc2faf0..0cb245345 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -18,9 +18,6 @@ const ( countErrorsDefault = true // insucureDefault is the default value which indicates that plaintext and insecure connection should be used insecureDefault = true - - // GRPCDashboardPath is the path to the GET grpcDashboard/ endpoint - GRPCDashboardPath = "/grpcDashboard" ) // collectHTTPInputs contain the inputs to the metrics collection task to be executed. diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index 6e2a345d1..e1a23cc9b 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -51,7 +51,7 @@ func TestRunCollectGRPCUnary(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := FortioResult{} + bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) @@ -169,7 +169,7 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := FortioResult{} + bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) @@ -284,7 +284,7 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := FortioResult{} + bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.Equal(t, `{"EndpointResults":{},"Summary":{"numVersions":1,"versionNames":null}}`, string(body)) diff --git a/base/collect_http.go b/base/collect_http.go index 58dda200f..8a2c1b5b1 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -62,9 +62,9 @@ type collectHTTPInputs struct { Endpoints map[string]endpoint `json:"endpoints" yaml:"endpoints"` } -// FortioResult is the raw data sent to the metrics server +// HTTPResult is the raw data sent to the metrics server // This data will be transformed into httpDashboard when getHTTPGrafana is called -type FortioResult struct { +type HTTPResult struct { // key is the endpoint EndpointResults map[string]*fhttp.HTTPRunnerResults @@ -80,16 +80,6 @@ const ( defaultHTTPNumRequests = int64(100) // defaultHTTPConnections is the default number of connections (parallel go routines) defaultHTTPConnections = 4 - - // MetricsServerURL is the URL of the metrics server - // TODO: move elsewhere because also needed by gRPC - MetricsServerURL = "METRICS_SERVER_URL" - // PerformanceResultPath is the path to the PUT performanceResult/ endpoint - // TODO: move elsewhere because also needed by gRPC - PerformanceResultPath = "/performanceResult" - - // HTTPDashboardPath is the path to the GET httpDashboard/ endpoint - HTTPDashboardPath = "/httpDashboard" ) var ( @@ -316,7 +306,7 @@ func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResult continue } - results[endpoint.URL] = ifr + results[endpointID] = ifr } } else { fo, err := getFortioOptions(t.With.endpoint) @@ -335,7 +325,7 @@ func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResult return nil, err } - results[t.With.endpoint.URL] = ifr + results[t.With.URL] = ifr } return results, err @@ -370,7 +360,7 @@ func (t *collectHTTPTask) run(exp *Experiment) error { } // push data to metrics service - fortioResult := FortioResult{ + fortioResult := HTTPResult{ EndpointResults: data, Summary: *exp.Result.Insights, } diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 18fb9db6e..07f6211d5 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -68,11 +68,13 @@ func TestRunCollectHTTP(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := FortioResult{} + bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) + fmt.Println(string(body)) + if _, ok := bodyFortioResult.EndpointResults[url]; !ok { assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) } @@ -178,6 +180,8 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { mux.HandleFunc("/"+bar, barHandler) baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) + endpoint1 := "endpoint1" + endpoint2 := "endpoint2" endpoint1URL := baseURL + foo endpoint2URL := baseURL + bar @@ -199,17 +203,17 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := FortioResult{} + bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[endpoint1URL]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", endpoint1URL)) + if _, ok := bodyFortioResult.EndpointResults[endpoint1]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", endpoint1)) } - if _, ok := bodyFortioResult.EndpointResults[endpoint2URL]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", endpoint2URL)) + if _, ok := bodyFortioResult.EndpointResults[endpoint2]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", endpoint2)) } }, }) @@ -257,69 +261,110 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { assert.Equal(t, exp.Result.Insights.NumVersions, 1) } -// TODO: this test is broken because the FortioResult.EndpointResults uses URL -// as the key but in this case, there are two endpoints with the same URL but -// different headers. -// -// // Multiple endpoints are provided but they share one URL -// // Test that the base-level URL is provided to each endpoint -// // Make multiple calls to the same URL but with different headers -// func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { -// mux, addr := fhttp.DynamicHTTPServer(false) - -// // handler -// fooCalled := false // ensure that foo header is provided -// barCalled := false // ensure that bar header is provided -// fooHandler := func(w http.ResponseWriter, r *http.Request) { -// from := r.Header.Get(from) -// if from == foo { -// fooCalled = true -// } else if from == bar { -// barCalled = true -// } - -// w.WriteHeader(200) -// } -// mux.HandleFunc("/", fooHandler) - -// baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) - -// // valid collect HTTP task... should succeed -// ct := &collectHTTPTask{ -// TaskMeta: TaskMeta{ -// Task: StringPointer(CollectHTTPTaskName), -// }, -// With: collectHTTPInputs{ -// endpoint: endpoint{ -// Duration: StringPointer("1s"), -// URL: baseURL, -// }, -// Endpoints: map[string]endpoint{ -// endpoint1: { -// Headers: map[string]string{ -// from: foo, -// }, -// }, -// endpoint2: { -// Headers: map[string]string{ -// from: bar, -// }, -// }, -// }, -// }, -// } - -// exp := &Experiment{ -// Spec: []Task{ct}, -// Result: &ExperimentResult{}, -// } -// exp.initResults(1) -// err := ct.run(exp) -// assert.NoError(t, err) -// assert.True(t, fooCalled) // ensure that the /foo/ handler is called -// assert.True(t, barCalled) // ensure that the /bar/ handler is called -// assert.Equal(t, exp.Result.Insights.NumVersions, 1) -// } +// Multiple endpoints are provided but they share one URL +// Test that the base-level URL is provided to each endpoint +// Make multiple calls to the same URL but with different headers +func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + mux, addr := fhttp.DynamicHTTPServer(false) + + // handler + fooCalled := false // ensure that foo header is provided + barCalled := false // ensure that bar header is provided + fooHandler := func(w http.ResponseWriter, r *http.Request) { + from := r.Header.Get(from) + if from == foo { + fooCalled = true + } else if from == bar { + barCalled = true + } + + w.WriteHeader(200) + } + mux.HandleFunc("/", fooHandler) + + baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) + endpoint1 := "endpoint1" + endpoint2 := "endpoint2" + + // mock metrics server + StartHTTPMock(t) + metricsServerCalled := false + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := HTTPResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + if _, ok := bodyFortioResult.EndpointResults[endpoint1]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", endpoint1)) + } + + if _, ok := bodyFortioResult.EndpointResults[endpoint2]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", endpoint2)) + } + }, + }) + + // valid collect HTTP task... should succeed + ct := &collectHTTPTask{ + TaskMeta: TaskMeta{ + Task: StringPointer(CollectHTTPTaskName), + }, + With: collectHTTPInputs{ + endpoint: endpoint{ + Duration: StringPointer("1s"), + URL: baseURL, + }, + Endpoints: map[string]endpoint{ + endpoint1: { + Headers: map[string]string{ + from: foo, + }, + }, + endpoint2: { + Headers: map[string]string{ + from: bar, + }, + }, + }, + }, + } + + exp := &Experiment{ + Spec: []Task{ct}, + Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, + } + exp.initResults(1) + err = ct.run(exp) + assert.NoError(t, err) + assert.True(t, fooCalled) // ensure that the /foo/ handler is called + assert.True(t, barCalled) // ensure that the /bar/ handler is called + assert.True(t, metricsServerCalled) + assert.Equal(t, exp.Result.Insights.NumVersions, 1) +} // If the endpoints cannot be reached, then do not throw an error // Should not return an nil pointer dereference error (see #1451) @@ -353,7 +398,7 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := FortioResult{} + bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) diff --git a/base/experiment_test.go b/base/experiment_test.go index f14adc291..65594840b 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -61,7 +61,7 @@ func TestRunningTasks(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := FortioResult{} + bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) @@ -135,7 +135,7 @@ func TestRunExperiment(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := FortioResult{} + bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) diff --git a/base/metrics.go b/base/metrics.go new file mode 100644 index 000000000..b5995d920 --- /dev/null +++ b/base/metrics.go @@ -0,0 +1,13 @@ +package base + +const ( + // MetricsServerURL is the URL of the metrics server + MetricsServerURL = "METRICS_SERVER_URL" + // PerformanceResultPath is the path to the PUT performanceResult/ endpoint + PerformanceResultPath = "/performanceResult" + + // HTTPDashboardPath is the path to the GET httpDashboard/ endpoint + HTTPDashboardPath = "/httpDashboard" + // GRPCDashboardPath is the path to the GET grpcDashboard/ endpoint + GRPCDashboardPath = "/grpcDashboard" +) diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go index e997dcaae..26bb8bbde 100644 --- a/cmd/kassert_test.go +++ b/cmd/kassert_test.go @@ -54,13 +54,11 @@ func TestKAssert(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := base.FortioResult{} + bodyFortioResult := base.HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) - fmt.Println(string(body)) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", url)) } diff --git a/cmd/krun_test.go b/cmd/krun_test.go index 201c3a24b..cf6f7dc93 100644 --- a/cmd/krun_test.go +++ b/cmd/krun_test.go @@ -47,13 +47,11 @@ func TestKRun(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := base.FortioResult{} + bodyFortioResult := base.HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) - fmt.Println(string(body)) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", url)) } diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 01a082b2b..2f81988f4 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -48,7 +48,7 @@ func TestLocalRun(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := base.FortioResult{} + bodyFortioResult := base.HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index eabd9c556..c46c25af2 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -97,7 +97,7 @@ func TestKubeRun(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := base.FortioResult{} + bodyFortioResult := base.HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) assert.NotNil(t, body) diff --git a/metrics/server.go b/metrics/server.go index cc9f58fc7..6d2d35df4 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -460,7 +460,7 @@ func getHTTPEndpointRow(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpoint return row } -func getHTTPDashboardHelper(fortioResult util.FortioResult) httpDashboard { +func getHTTPDashboardHelper(fortioResult util.HTTPResult) httpDashboard { dashboard := httpDashboard{ Endpoints: map[string]httpEndpointRow{}, } @@ -577,7 +577,7 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { return } - fortioResult := util.FortioResult{} + fortioResult := util.HTTPResult{} err = json.Unmarshal(result, &fortioResult) if err != nil { errorMessage := fmt.Sprintf("cannot JSON unmarshal result into FortioResult: \"%s\"", string(result)) diff --git a/metrics/server_test.go b/metrics/server_test.go index f7b11508b..a1f16726e 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -350,7 +350,7 @@ func TestTestRM(t *testing.T) { } func TestGetHTTPDashboardHelper(t *testing.T) { - fortioResult := util.FortioResult{} + fortioResult := util.HTTPResult{} err := json.Unmarshal([]byte(fortioResultJSON), &fortioResult) assert.NoError(t, err) From f75239af13f941cff92c3e6b3f05ffae17f6b24d Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 4 Aug 2023 09:42:09 -0400 Subject: [PATCH 052/121] Better gRPC multiple endpoint support Signed-off-by: Alan Cha --- action/run_test.go | 2 +- base/collect_grpc.go | 2 +- base/collect_grpc_test.go | 31 +++++++++++++------------------ base/collect_http_test.go | 2 +- base/experiment_test.go | 4 ++-- cmd/kassert_test.go | 2 +- cmd/krun_test.go | 2 +- driver/filedriver_test.go | 2 +- driver/kubedriver_test.go | 2 +- 9 files changed, 22 insertions(+), 27 deletions(-) diff --git a/action/run_test.go b/action/run_test.go index d76244d52..1cacc5e54 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -59,7 +59,7 @@ func TestKubeRun(t *testing.T) { assert.NotNil(t, body) if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", url)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) } }, }) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index 0cb245345..f78183957 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -108,7 +108,7 @@ func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) continue } - results[endpoint.Call] = igr + results[endpointID] = igr } } else { // TODO: supply all the allowed options diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index e1a23cc9b..a8302783e 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -57,7 +57,7 @@ func TestRunCollectGRPCUnary(t *testing.T) { assert.NotNil(t, body) if _, ok := bodyFortioResult.EndpointResults[call]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", call)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", call)) } }, }) @@ -146,11 +146,6 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { err := os.Setenv(MetricsServerURL, metricsServerURL) assert.NoError(t, err) - unaryCall := "helloworld.Greeter.SayHello" - serverCall := "helloworld.Greeter.SayHelloCS" - clientCall := "helloworld.Greeter.SayHellos" - bidirectionalCall := "helloworld.Greeter.SayHelloBidi" - // mock metrics server StartHTTPMock(t) metricsServerCalled := false @@ -174,20 +169,20 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[unaryCall]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", unaryCall)) + if _, ok := bodyFortioResult.EndpointResults[unary]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", unary)) } - if _, ok := bodyFortioResult.EndpointResults[serverCall]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", serverCall)) + if _, ok := bodyFortioResult.EndpointResults[server]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", server)) } - if _, ok := bodyFortioResult.EndpointResults[clientCall]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", clientCall)) + if _, ok := bodyFortioResult.EndpointResults[client]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", client)) } - if _, ok := bodyFortioResult.EndpointResults[bidirectionalCall]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", bidirectionalCall)) + if _, ok := bodyFortioResult.EndpointResults[bidirectional]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", bidirectional)) } }, }) @@ -212,19 +207,19 @@ func TestRunCollectGRPCEndpoints(t *testing.T) { Endpoints: map[string]runner.Config{ unary: { Data: map[string]interface{}{"name": "bob"}, - Call: unaryCall, + Call: "helloworld.Greeter.SayHello", }, server: { Data: map[string]interface{}{"name": "bob"}, - Call: serverCall, + Call: "helloworld.Greeter.SayHelloCS", }, client: { Data: map[string]interface{}{"name": "bob"}, - Call: clientCall, + Call: "helloworld.Greeter.SayHellos", }, bidirectional: { Data: map[string]interface{}{"name": "bob"}, - Call: bidirectionalCall, + Call: "helloworld.Greeter.SayHelloBidi", }, }, }, diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 07f6211d5..1ec2f1dcd 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -76,7 +76,7 @@ func TestRunCollectHTTP(t *testing.T) { fmt.Println(string(body)) if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) } }, }) diff --git a/base/experiment_test.go b/base/experiment_test.go index 65594840b..e80535f39 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -67,7 +67,7 @@ func TestRunningTasks(t *testing.T) { assert.NotNil(t, body) if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) } }, }) @@ -141,7 +141,7 @@ func TestRunExperiment(t *testing.T) { assert.NotNil(t, body) if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) } }, }) diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go index 26bb8bbde..92545f268 100644 --- a/cmd/kassert_test.go +++ b/cmd/kassert_test.go @@ -60,7 +60,7 @@ func TestKAssert(t *testing.T) { assert.NotNil(t, body) if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", url)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) } }, }) diff --git a/cmd/krun_test.go b/cmd/krun_test.go index cf6f7dc93..cf6bc1fb3 100644 --- a/cmd/krun_test.go +++ b/cmd/krun_test.go @@ -53,7 +53,7 @@ func TestKRun(t *testing.T) { assert.NotNil(t, body) if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain call: %s", url)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) } }, }) diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 2f81988f4..8769b6fa7 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -54,7 +54,7 @@ func TestLocalRun(t *testing.T) { assert.NotNil(t, body) if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) } }, }) diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index c46c25af2..50138271e 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -103,7 +103,7 @@ func TestKubeRun(t *testing.T) { assert.NotNil(t, body) if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain url: %s", url)) + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) } }, }) From a13f95033b7ca8841a3ae62b1090bf1307433d4e Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 4 Aug 2023 09:48:13 -0400 Subject: [PATCH 053/121] Add single endpoint multiple call test for gRPC Signed-off-by: Alan Cha --- base/collect_grpc_test.go | 96 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 1 deletion(-) diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index a8302783e..c180207ff 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -18,6 +18,7 @@ import ( const ( unary = "unary" + unary2 = "unary2" server = "server" client = "client" bidirectional = "bidirectional" @@ -140,7 +141,7 @@ func TestRunCollectGRPCUnaryNoEndpoint(t *testing.T) { // Credit: Several of the tests in this file are based on // https://github.com/bojand/ghz/blob/master/runner/run_test.go -func TestRunCollectGRPCEndpoints(t *testing.T) { +func TestRunCollectGRPCMultipleEndpoints(t *testing.T) { // define METRICS_SERVER_URL metricsServerURL := "http://iter8.default:8080" err := os.Setenv(MetricsServerURL, metricsServerURL) @@ -331,3 +332,96 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { assert.NoError(t, err) assert.True(t, metricsServerCalled) } + +func TestRunCollectGRPCSingleEndpointMultipleCalls(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + // mock metrics server + StartHTTPMock(t) + metricsServerCalled := false + MockMetricsServer(MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + PerformanceResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyFortioResult := HTTPResult{} + err = json.Unmarshal(body, &bodyFortioResult) + assert.NoError(t, err) + assert.NotNil(t, body) + + fmt.Println(string(body)) + + if _, ok := bodyFortioResult.EndpointResults[unary]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", unary)) + } + + if _, ok := bodyFortioResult.EndpointResults[unary2]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", unary2)) + } + }, + }) + + _ = os.Chdir(t.TempDir()) + callType := helloworld.Unary + gs, s, err := internal.StartServer(false) + if err != nil { + assert.FailNow(t, err.Error()) + } + t.Cleanup(s.Stop) + + // valid collect GRPC task... should succeed + ct := &collectGRPCTask{ + TaskMeta: TaskMeta{ + Task: StringPointer(CollectGRPCTaskName), + }, + With: collectGRPCInputs{ + Config: runner.Config{ + Host: internal.LocalHostPort, + Call: "helloworld.Greeter.SayHello", + }, + Endpoints: map[string]runner.Config{ + unary: { + Data: map[string]interface{}{"name": "bob"}, + }, + unary2: { + Data: map[string]interface{}{"name": "charles"}, + }, + }, + }, + } + + log.Logger.Debug("dial timeout before defaulting... ", ct.With.DialTimeout.String()) + + exp := &Experiment{ + Spec: []Task{ct}, + Result: &ExperimentResult{}, + Metadata: ExperimentMetadata{ + Name: myName, + Namespace: myNamespace, + }, + } + exp.initResults(1) + err = ct.run(exp) + + log.Logger.Debug("dial timeout after defaulting... ", ct.With.DialTimeout.String()) + + assert.NoError(t, err) + assert.Equal(t, exp.Result.Insights.NumVersions, 1) + assert.True(t, metricsServerCalled) + + count := gs.GetCount(callType) + assert.Equal(t, 400, count) +} From 8b0c4eba1dcc402b14f9147f529ab1078c10f05c Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 4 Aug 2023 09:55:42 -0400 Subject: [PATCH 054/121] Remove runner option Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 4 ---- .github/workflows/lintcharts2.yaml | 6 ++---- action/launch_test.go | 4 ++-- charts/iter8/templates/k8s.yaml | 5 ----- charts/iter8/values.yaml | 4 ---- cmd/klaunch.go | 4 +--- driver/kubedriver_test.go | 4 ++-- 7 files changed, 7 insertions(+), 24 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 520078723..415fc827e 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -107,7 +107,6 @@ jobs: iter8 k launch \ --set tasks={http} \ --set http.url="http://httpbin.default/get" \ - --set runner=job - name: try other iter8 k commands run: | iter8 k assert -c completed -c nofailure --timeout 60s @@ -178,7 +177,6 @@ jobs: --set grpc.host="hello.default:50051" \ --set grpc.call="helloworld.Greeter.SayHello" \ --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set runner=job - name: try other iter8 k commands run: | iter8 k assert -c completed -c nofailure --timeout 60s @@ -208,7 +206,6 @@ jobs: --set ready.service="httpbin" \ --set ready.timeout=60s \ --set http.url=http://httpbin.default \ - --set runner=job - name: k assert experiment completed without failures run: | iter8 k assert -c completed -c nofailure --timeout 60s @@ -238,7 +235,6 @@ jobs: --set ready.timeout=60s \ --set ready.namespace=default \ --set http.url=http://httpbin.default/get \ - --set runner=job - name: k assert experiment completed without failures run: | iter8 k assert -n experiments -c completed -c nofailure --timeout 60s \ No newline at end of file diff --git a/.github/workflows/lintcharts2.yaml b/.github/workflows/lintcharts2.yaml index 65f67103a..31b5281f8 100644 --- a/.github/workflows/lintcharts2.yaml +++ b/.github/workflows/lintcharts2.yaml @@ -34,8 +34,7 @@ jobs: run: | helm template charts/iter8 \ --set tasks={http} \ - --set http.url=http://httpbin.default/get \ - --set runner=job >> iter8.yaml + --set http.url=http://httpbin.default/get >> iter8.yaml - name: Lint Kubernetes YAML file if: steps.modified-files.outputs.any_modified == 'true' @@ -69,8 +68,7 @@ jobs: --set tasks={grpc} \ --set grpc.host="hello.default:50051" \ --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set runner=job >> iter8.yaml + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" >> iter8.yaml - name: Lint Kubernetes YAML file if: steps.modified-files.outputs.any_modified == 'true' diff --git a/action/launch_test.go b/action/launch_test.go index 62596acbb..30a64ae85 100644 --- a/action/launch_test.go +++ b/action/launch_test.go @@ -16,7 +16,7 @@ func TestKubeLaunch(t *testing.T) { // fix lOpts lOpts := NewLaunchOpts(driver.NewFakeKubeDriver(cli.New())) - lOpts.Values = []string{"tasks={http}", "http.url=https://httpbin.org/get", "http.duration=2s", "runner=job"} + lOpts.Values = []string{"tasks={http}", "http.url=https://httpbin.org/get", "http.duration=2s"} err = lOpts.KubeRun() assert.NoError(t, err) @@ -35,7 +35,7 @@ func TestKubeLaunchLocalChart(t *testing.T) { lOpts := NewLaunchOpts(driver.NewFakeKubeDriver(cli.New())) lOpts.ChartName = base.CompletePath("../charts", "iter8") lOpts.LocalChart = true - lOpts.Values = []string{"tasks={http}", "http.url=https://httpbin.org/get", "http.duration=2s", "runner=job"} + lOpts.Values = []string{"tasks={http}", "http.url=https://httpbin.org/get", "http.duration=2s"} err = lOpts.KubeRun() assert.NoError(t, err) diff --git a/charts/iter8/templates/k8s.yaml b/charts/iter8/templates/k8s.yaml index 65e5a5c85..fe530c488 100644 --- a/charts/iter8/templates/k8s.yaml +++ b/charts/iter8/templates/k8s.yaml @@ -8,9 +8,4 @@ {{ include "k.rolebinding" . }} {{- end}} --- -{{- if eq "job" .Values.runner }} {{ include "k.job" . }} -{{- else if eq "none" .Values.runner }} -{{- else }} -{{- fail "runner must be one of job or none" }} -{{- end }} diff --git a/charts/iter8/values.yaml b/charts/iter8/values.yaml index e3a497bff..9b8a175b5 100644 --- a/charts/iter8/values.yaml +++ b/charts/iter8/values.yaml @@ -4,10 +4,6 @@ iter8Image: iter8/iter8:0.15 ### majorMinor is the minor version of Iter8 majorMinor: v0.15 -# TODO: Should this only ever be job? -### runner for Kubernetes experiments may be job or none -runner: none - logLevel: info ### resources are the resource limits for the pods diff --git a/cmd/klaunch.go b/cmd/klaunch.go index 1ba923832..8fe518957 100644 --- a/cmd/klaunch.go +++ b/cmd/klaunch.go @@ -14,14 +14,12 @@ import ( const klaunchDesc = ` Launch an experiment inside a Kubernetes cluster. - iter8 k launch --set "tasks={http}" --set http.url=https://httpbin.org/get \ - --set runner=job + iter8 k launch --set "tasks={http}" --set http.url=https://httpbin.org/get Use the dry option to simulate a Kubernetes experiment. This creates the manifest.yaml file, but does not run the experiment, and does not deploy any experiment resource objects in the cluster. iter8 k launch \ --set http.url=https://httpbin.org/get \ - --set runner=job \ --dry The launch command creates the 'charts' subdirectory under the current working directory, downloads the Iter8 experiment chart, and places it under 'charts'. This behavior can be controlled using various launch flags. diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index 50138271e..07207ce2b 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -30,7 +30,7 @@ func TestKOps(t *testing.T) { // install err = kd.install(action.ChartPathOptions{}, base.CompletePath("../", "charts/iter8"), values.Options{ - Values: []string{"tasks={http}", "http.url=https://httpbin.org/get", "runner=job"}, + Values: []string{"tasks={http}", "http.url=https://httpbin.org/get"}, }, kd.Group, false) assert.NoError(t, err) @@ -45,7 +45,7 @@ func TestKOps(t *testing.T) { // upgrade err = kd.upgrade(action.ChartPathOptions{}, base.CompletePath("../", "charts/iter8"), values.Options{ - Values: []string{"tasks={http}", "http.url=https://httpbin.org/get", "runner=job"}, + Values: []string{"tasks={http}", "http.url=https://httpbin.org/get"}, }, kd.Group, false) assert.NoError(t, err) From 6ec45aa7cb088fdc8f7cee08ed4a22e9736e2be8 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 4 Aug 2023 10:07:15 -0400 Subject: [PATCH 055/121] Fix worfkflow Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 415fc827e..c4bc476f4 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -106,7 +106,7 @@ jobs: run: | iter8 k launch \ --set tasks={http} \ - --set http.url="http://httpbin.default/get" \ + --set http.url="http://httpbin.default/get" - name: try other iter8 k commands run: | iter8 k assert -c completed -c nofailure --timeout 60s @@ -176,7 +176,7 @@ jobs: --set tasks={grpc} \ --set grpc.host="hello.default:50051" \ --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" - name: try other iter8 k commands run: | iter8 k assert -c completed -c nofailure --timeout 60s @@ -205,7 +205,7 @@ jobs: --set ready.deploy="httpbin" \ --set ready.service="httpbin" \ --set ready.timeout=60s \ - --set http.url=http://httpbin.default \ + --set http.url=http://httpbin.default - name: k assert experiment completed without failures run: | iter8 k assert -c completed -c nofailure --timeout 60s @@ -234,7 +234,7 @@ jobs: --set ready.service="httpbin" \ --set ready.timeout=60s \ --set ready.namespace=default \ - --set http.url=http://httpbin.default/get \ + --set http.url=http://httpbin.default/get - name: k assert experiment completed without failures run: | iter8 k assert -n experiments -c completed -c nofailure --timeout 60s \ No newline at end of file From 8d3bc5f494a082a276e87ea365a86a6db611dd38 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 8 Aug 2023 17:19:24 -0400 Subject: [PATCH 056/121] Remove if parameter from github and slack tasks Signed-off-by: Alan Cha --- charts/iter8/templates/_task-github.tpl | 3 --- charts/iter8/templates/_task-slack.tpl | 3 --- 2 files changed, 6 deletions(-) diff --git a/charts/iter8/templates/_task-github.tpl b/charts/iter8/templates/_task-github.tpl index 8ee103d02..34cf4e9ce 100644 --- a/charts/iter8/templates/_task-github.tpl +++ b/charts/iter8/templates/_task-github.tpl @@ -14,9 +14,6 @@ {{- end }} # task: send a GitHub notification - task: notify -{{- if .if }} - if: {{ .if | quote }} -{{- end }} with: url: https://api.github.com/repos/{{ .owner }}/{{ .repo }}/dispatches method: POST diff --git a/charts/iter8/templates/_task-slack.tpl b/charts/iter8/templates/_task-slack.tpl index 51233a62d..0d4867c9d 100644 --- a/charts/iter8/templates/_task-slack.tpl +++ b/charts/iter8/templates/_task-slack.tpl @@ -8,9 +8,6 @@ {{- end }} # task: send a Slack notification - task: notify -{{- if .if }} - if: {{ .if | quote }} -{{- end }} with: url: {{ .url }} method: POST From c64f202f58fe8167c0b523b486ff653b02856da5 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 8 Aug 2023 17:19:35 -0400 Subject: [PATCH 057/121] Remove custommetrics template Signed-off-by: Alan Cha --- templates/custommetrics/istio-prom.tpl | 114 ------------------------- 1 file changed, 114 deletions(-) delete mode 100644 templates/custommetrics/istio-prom.tpl diff --git a/templates/custommetrics/istio-prom.tpl b/templates/custommetrics/istio-prom.tpl deleted file mode 100644 index 172af432b..000000000 --- a/templates/custommetrics/istio-prom.tpl +++ /dev/null @@ -1,114 +0,0 @@ -# This file provides templated metric specifications that enable -# Iter8 to retrieve metrics from Istio's Prometheus add-on. -# -# For a list of metrics supported out-of-the-box by the Istio Prometheus add-on, -# please see https://istio.io/latest/docs/reference/config/metrics/ -# -# Iter8 substitutes the placeholders in this file with values, -# and uses the resulting metric specs to query Prometheus. -# The placeholders are as follows. -# -# labels map[string]interface{} optional -# elapsedTimeSeconds int implicit -# startingTime string optional -# latencyPercentiles []int optional -# -# labels: this is the set of Prometheus labels that will be used to identify a particular -# app version. These labels will be applied to every Prometheus query. To learn more -# about what labels you can use for Prometheus, please see -# https://istio.io/latest/docs/reference/config/metrics/#labels -# -# elapsedTimeSeconds: this should not be specified directly by the user. -# It is implicitly computed by Iter8 according to the following formula -# elapsedTimeSeconds := (time.Now() - startingTime).Seconds() -# -# startingTime: By default, this is the time at which the Iter8 experiment started. -# The user can explicitly specify the startingTime for each app version -# (for example, the user can set the startingTime to the creation time of the app version) -# -# latencyPercentiles: Each item in this slice will create a new metric spec. -# For example, if this is set to [50,75,90,95], -# then, latency-p50, latency-p75, latency-p90, latency-p95 metric specs are created. - -{{- define "labels"}} -{{- range $key, $val := .labels }} -{{- if or (eq (kindOf $val) "slice") (eq (kindOf $val) "map")}} -{{- fail (printf "labels should be a primitive types but received: %s :%s" $key $val) }} -{{- end }} -{{- if eq $key "response_code"}} -{{- fail "labels should not contain 'response_code'" }} -{{- end }} - {{ $key }}="{{ $val }}", -{{- end }} -{{- end}} - -# url is the HTTP endpoint where the Prometheus service installed by Istio's Prom add-on -# can be queried for metrics - -url: {{ .istioPromURL | default "http://prometheus.istio-system:9090/api/v1/query" }} -provider: istio-prom -method: GET -metrics: -- name: request-count - type: counter - description: | - Number of requests - params: - - name: query - value: | - sum(last_over_time(istio_requests_total{ - {{- template "labels" . }} - }[{{ .elapsedTimeSeconds }}s])) or on() vector(0) - jqExpression: .data.result[0].value[1] | tonumber -- name: error-count - type: counter - description: | - Number of unsuccessful requests - params: - - name: query - value: | - sum(last_over_time(istio_requests_total{ - response_code=~'5..', - {{- template "labels" . }} - }[{{ .elapsedTimeSeconds }}s])) or on() vector(0) - jqExpression: .data.result[0].value[1] | tonumber -- name: error-rate - type: gauge - description: | - Fraction of unsuccessful requests - params: - - name: query - value: | - (sum(last_over_time(istio_requests_total{ - response_code=~'5..', - {{- template "labels" . }} - }[{{ .elapsedTimeSeconds }}s])) or on() vector(0))/(sum(last_over_time(istio_requests_total{ - {{- template "labels" . }} - }[{{ .elapsedTimeSeconds }}s])) or on() vector(0)) - jqExpression: .data.result.[0].value.[1] | tonumber -- name: latency-mean - type: gauge - description: | - Mean latency - params: - - name: query - value: | - (sum(last_over_time(istio_request_duration_milliseconds_sum{ - {{- template "labels" . }} - }[{{ .elapsedTimeSeconds }}s])) or on() vector(0))/(sum(last_over_time(istio_requests_total{ - {{- template "labels" . }} - }[{{ .elapsedTimeSeconds }}s])) or on() vector(0)) - jqExpression: .data.result[0].value[1] | tonumber -{{- range $i, $p := .latencyPercentiles }} -- name: latency-p{{ $p }} - type: gauge - description: | - {{ $p }} percentile latency - params: - - name: query - value: | - histogram_quantile(0.{{ $p }}, sum(rate(istio_request_duration_milliseconds_bucket{ - {{- template "labels" $ }} - }[{{ $.elapsedTimeSeconds }}s])) by (le)) - jqExpression: .data.result[0].value[1] | tonumber -{{- end }} From b508a9e918e900a80a24b10f0213709bd09323ca Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 8 Aug 2023 17:28:52 -0400 Subject: [PATCH 058/121] Add name and namespace to experiment result For github and slack tasks Signed-off-by: Alan Cha --- base/experiment.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/base/experiment.go b/base/experiment.go index d5fedb2a6..26eb030d4 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -53,6 +53,12 @@ type Experiment struct { // ExperimentResult defines the current results from the experiment type ExperimentResult struct { + // Name is the name of this experiment + Name string `json:"name,omitempty" yaml:"name,omitempty"` + + // Namespace is the namespace of this experiment + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + // Revision of this experiment Revision int `json:"revision,omitempty" yaml:"revision,omitempty"` @@ -216,6 +222,8 @@ func (in *Insights) TrackVersionStr(i int) string { // initResults initializes the results section of an experiment func (exp *Experiment) initResults(revision int) { exp.Result = &ExperimentResult{ + Name: exp.Metadata.Name, + Namespace: exp.Metadata.Namespace, Revision: revision, StartTime: time.Now(), NumLoops: 0, From 8dda62dd29f2e2ae276cd9136fbfbbb72226314f Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 8 Aug 2023 17:42:40 -0400 Subject: [PATCH 059/121] Add space Signed-off-by: Alan Cha --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index 3490f9db2..73fdca3c3 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -7,6 +7,6 @@ If you are starting to use Iter8, we would love to see you in the list below. Pl | IBM Cloud (DevOps Toolchains) | [Michael Kalantar](https://github.com/kalantar), [Srinivasan Parthasarathy](https://github.com/sriumcp) | | IBM Research Cloud Innovation Lab | [Atin Sood](https://github.com/atinsood)| | IBM Cloud (Code Engine) | [Doug Davis](https://github.com/duglin) | -| ChaosNative(LitmusChaos) | [Shubham Chaudhary](https://github.com/ispeakc0de) | +| ChaosNative (LitmusChaos) | [Shubham Chaudhary](https://github.com/ispeakc0de) | | Seldon Core | [Clive Cox](https://github.com/cliveseldon) | | Datagrate, Inc. (jetic.io) | [Mert Öztürk](https://github.com/mertdotcc) | \ No newline at end of file From 4f8b7ec83ae421261ac142a79412fd71d924d9a7 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 11 Aug 2023 20:00:21 -0400 Subject: [PATCH 060/121] Remove reuseResult Signed-off-by: Alan Cha --- action/run.go | 2 +- base/experiment.go | 8 ++++---- base/experiment_test.go | 2 +- driver/filedriver_test.go | 2 +- driver/kubedriver_test.go | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/action/run.go b/action/run.go index 8a3f01d80..17dc6b389 100644 --- a/action/run.go +++ b/action/run.go @@ -33,5 +33,5 @@ func (rOpts *RunOpts) KubeRun() error { return err } - return base.RunExperiment(rOpts.ReuseResult, rOpts.KubeDriver) + return base.RunExperiment(rOpts.KubeDriver) } diff --git a/base/experiment.go b/base/experiment.go index 26eb030d4..2ed847892 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -410,14 +410,14 @@ func BuildExperiment(driver Driver) (*Experiment, error) { } // RunExperiment runs an experiment -func RunExperiment(reuseResult bool, driver Driver) error { +func RunExperiment(driver Driver) error { var exp *Experiment var err error if exp, err = BuildExperiment(driver); err != nil { return err } - if !reuseResult { - exp.initResults(driver.GetRevision()) - } + + exp.initResults(driver.GetRevision()) + return exp.run(driver) } diff --git a/base/experiment_test.go b/base/experiment_test.go index e80535f39..308785632 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -158,7 +158,7 @@ func TestRunExperiment(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(e.Spec)) - err = RunExperiment(false, &mockDriver{e}) + err = RunExperiment(&mockDriver{e}) assert.NoError(t, err) assert.True(t, metricsServerCalled) // sanity check -- handler was called diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 8769b6fa7..661340687 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -67,7 +67,7 @@ func TestLocalRun(t *testing.T) { fd := FileDriver{ RunDir: ".", } - err = base.RunExperiment(false, &fd) + err = base.RunExperiment(&fd) assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index 07207ce2b..c86090c2f 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -136,7 +136,7 @@ func TestKubeRun(t *testing.T) { // }, // }, metav1.CreateOptions{}) - err = base.RunExperiment(false, kd) + err = base.RunExperiment(kd) assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) From 8a2738f6a00a5c6e46b5d6031d6a87c81c4cebbb Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 11 Aug 2023 20:37:46 -0400 Subject: [PATCH 061/121] Performance result has result instead of insights Signed-off-by: Alan Cha --- base/collect_grpc.go | 4 +-- base/collect_http.go | 65 ++++++-------------------------------------- base/metrics.go | 62 ++++++++++++++++++++++++++++++++++++++++++ metrics/server.go | 5 ++-- 4 files changed, 75 insertions(+), 61 deletions(-) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index f78183957..d9bc94768 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -46,7 +46,7 @@ type GHZResult struct { // key is the endpoint EndpointResults map[string]*runner.Report - Summary Insights + Summary ExperimentResult } // initializeDefaults sets default values for the collect task @@ -163,7 +163,7 @@ func (t *collectGRPCTask) run(exp *Experiment) error { // push data to metrics service ghzResult := GHZResult{ EndpointResults: data, - Summary: *exp.Result.Insights, + Summary: *exp.Result, } // get URL of metrics server from environment variable diff --git a/base/collect_http.go b/base/collect_http.go index 8a2c1b5b1..874879213 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -1,12 +1,9 @@ package base import ( - "bytes" "encoding/json" "fmt" "io" - "net/http" - "net/url" "os" "time" @@ -68,7 +65,7 @@ type HTTPResult struct { // key is the endpoint EndpointResults map[string]*fhttp.HTTPRunnerResults - Summary Insights + Summary ExperimentResult } const ( @@ -217,58 +214,6 @@ func getFortioOptions(c endpoint) (*fhttp.HTTPRunnerOptions, error) { return fo, nil } -func putPerformanceResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { - // handle URL and URL parameters - u, err := url.ParseRequestURI(metricsServerURL + PerformanceResultPath) - if err != nil { - return err - } - - params := url.Values{} - params.Add("namespace", namespace) - params.Add("experiment", experiment) - u.RawQuery = params.Encode() - urlStr := fmt.Sprintf("%v", u) - - log.Logger.Trace(fmt.Sprintf("PUT request URL: %s", urlStr)) - - // handle payload - dataBytes, err := json.Marshal(data) - if err != nil { - log.Logger.Error("cannot JSON marshal data for metrics server request: ", err) - return err - } - - // create request - req, err := http.NewRequest(http.MethodPut, urlStr, bytes.NewBuffer(dataBytes)) - if err != nil { - log.Logger.Error("cannot create new HTTP request metrics server: ", err) - return err - } - - req.Header.Set("Content-Type", "application/json") - - log.Logger.Trace("sending request") - - // send request - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - log.Logger.Error("could not send request to metrics server: ", err) - return err - } - defer func() { - err = resp.Body.Close() - if err != nil { - log.Logger.Error("could not close response body: ", err) - } - }() - - log.Logger.Trace("sent request") - - return nil -} - // getFortioResults collects Fortio run results // func (t *collectHTTPTask) getFortioResults() (*fhttp.HTTPRunnerResults, error) { // key is the metric prefix @@ -359,12 +304,18 @@ func (t *collectHTTPTask) run(exp *Experiment) error { return err } + result, _ := json.Marshal(exp.Result) + log.Logger.Trace("before fortioResult", string(result)) + // push data to metrics service fortioResult := HTTPResult{ EndpointResults: data, - Summary: *exp.Result.Insights, + Summary: *exp.Result, } + fortioResultJson, _ := json.Marshal(fortioResult) + log.Logger.Trace("fortioResultJson", string(fortioResultJson)) + // get URL of metrics server from environment variable metricsServerURL, ok := os.LookupEnv(MetricsServerURL) if !ok { diff --git a/base/metrics.go b/base/metrics.go index b5995d920..63d9f6aae 100644 --- a/base/metrics.go +++ b/base/metrics.go @@ -1,5 +1,15 @@ package base +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + + log "github.com/iter8-tools/iter8/base/log" +) + const ( // MetricsServerURL is the URL of the metrics server MetricsServerURL = "METRICS_SERVER_URL" @@ -11,3 +21,55 @@ const ( // GRPCDashboardPath is the path to the GET grpcDashboard/ endpoint GRPCDashboardPath = "/grpcDashboard" ) + +func putPerformanceResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { + // handle URL and URL parameters + u, err := url.ParseRequestURI(metricsServerURL + PerformanceResultPath) + if err != nil { + return err + } + + params := url.Values{} + params.Add("namespace", namespace) + params.Add("experiment", experiment) + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) + + log.Logger.Trace(fmt.Sprintf("performance result URL: %s", urlStr)) + + // handle payload + dataBytes, err := json.Marshal(data) + if err != nil { + log.Logger.Error("cannot JSON marshal data for metrics server request: ", err) + return err + } + + // create request + req, err := http.NewRequest(http.MethodPut, urlStr, bytes.NewBuffer(dataBytes)) + if err != nil { + log.Logger.Error("cannot create new HTTP request metrics server: ", err) + return err + } + + req.Header.Set("Content-Type", "application/json") + + log.Logger.Trace("sending request") + + // send request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + log.Logger.Error("could not send request to metrics server: ", err) + return err + } + defer func() { + err = resp.Body.Close() + if err != nil { + log.Logger.Error("could not close response body: ", err) + } + }() + + log.Logger.Trace("sent request") + + return nil +} diff --git a/metrics/server.go b/metrics/server.go index 6d2d35df4..8a9bf197a 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -13,6 +13,7 @@ import ( "github.com/bojand/ghz/runner" "github.com/iter8-tools/iter8/abn" + "github.com/iter8-tools/iter8/base" util "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/base/log" "github.com/iter8-tools/iter8/controllers" @@ -80,7 +81,7 @@ type httpDashboard struct { // key is the endpoint Endpoints map[string]httpEndpointRow - Summary util.Insights + Summary base.ExperimentResult } type ghzStatistics struct { @@ -99,7 +100,7 @@ type ghzDashboard struct { // key is the endpoint Endpoints map[string]ghzEndpointRow - Summary util.Insights + Summary base.ExperimentResult } var allRoutemaps controllers.AllRouteMapsInterface = &controllers.DefaultRoutemaps{} From 3276a297a9b166f5b93c1a07c46dfafd7ea43d4a Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 11 Aug 2023 20:38:03 -0400 Subject: [PATCH 062/121] Remove NumLoops Signed-off-by: Alan Cha --- base/experiment.go | 23 ++++++----------------- base/notify.go | 4 ---- 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/base/experiment.go b/base/experiment.go index 2ed847892..88a33b59e 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -65,9 +65,6 @@ type ExperimentResult struct { // StartTime is the time when the experiment run started StartTime time.Time `json:"startTime" yaml:"startTime"` - // NumLoops is the number of iterations this experiment has been running for - NumLoops int `json:"numLoops" yaml:"numLoops"` - // NumCompletedTasks is the number of completed tasks NumCompletedTasks int `json:"numCompletedTasks" yaml:"numCompletedTasks"` @@ -226,7 +223,6 @@ func (exp *Experiment) initResults(revision int) { Namespace: exp.Metadata.Namespace, Revision: revision, StartTime: time.Now(), - NumLoops: 0, NumCompletedTasks: 0, Failure: false, Iter8Version: MajorMinor, @@ -293,10 +289,6 @@ func (exp *Experiment) run(driver Driver) error { log.Logger.Debug("exp result exists now ... ") - exp.incrementNumLoops() - log.Logger.Debugf("experiment loop %d started ...", exp.Result.NumLoops) - exp.resetNumCompletedTasks() - err = driver.Write(exp) if err != nil { return err @@ -359,15 +351,6 @@ func (exp *Experiment) incrementNumCompletedTasks() { exp.Result.NumCompletedTasks++ } -func (exp *Experiment) resetNumCompletedTasks() { - exp.Result.NumCompletedTasks = 0 -} - -// incrementNumLoops increments the number of loops (experiment iterations) -func (exp *Experiment) incrementNumLoops() { - exp.Result.NumLoops++ -} - // getIf returns the condition (if any) which determine // whether of not if this task needs to run func getIf(t Task) *string { @@ -417,7 +400,13 @@ func RunExperiment(driver Driver) error { return err } + result, _ := json.Marshal(exp.Result) + log.Logger.Trace("Initializing result", string(result)) + exp.initResults(driver.GetRevision()) + result, _ = json.Marshal(exp.Result) + log.Logger.Trace("initialized result", string(result)) + return exp.run(driver) } diff --git a/base/notify.go b/base/notify.go index 5800cf35d..af4a0949f 100644 --- a/base/notify.go +++ b/base/notify.go @@ -63,9 +63,6 @@ type Report struct { // NumCompletedTasks is the number of completed tasks in the experiment NumCompletedTasks int `json:"numCompletedTasks" yaml:"numCompletedTasks"` - // NumLoops is the current loop of the experiment - NumLoops int `json:"numLoops" yaml:"numLoops"` - // Experiment is the experiment struct Experiment *Experiment `json:"experiment" yaml:"experiment"` } @@ -79,7 +76,6 @@ func getReport(exp *Experiment) map[string]Report { NoTaskFailures: exp.NoFailure(), NumTasks: len(exp.Spec), NumCompletedTasks: exp.Result.NumCompletedTasks, - NumLoops: exp.Result.NumLoops, Experiment: exp, }, } From 3a1b9a0dcccb35b6f60b59f1013e1adf85a05077 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Sun, 13 Aug 2023 16:50:28 -0400 Subject: [PATCH 063/121] Add dashboardSummary type Signed-off-by: Alan Cha --- metrics/server.go | 55 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/metrics/server.go b/metrics/server.go index 8a9bf197a..4a92c70fb 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -28,6 +28,7 @@ import ( const ( configEnv = "METRICS_CONFIG_FILE" defaultPortNumber = 8080 + timeFormat = "02 Jan 06 15:04 MST" ) // metricsConfig defines the configuration of the controllers @@ -66,6 +67,33 @@ type metricSummary struct { SummaryOverUsers []*versionSummarizedMetric } +// dashboardSummary is a properly capitalized version of ExperimentResult +type dashboardSummary struct { + // Name is the name of this experiment + Name string + + // Namespace is the namespace of this experiment + Namespace string + + // Revision of this experiment + Revision int + + // StartTime is the time when the experiment run started + StartTime string `json:"Start time"` + + // NumCompletedTasks is the number of completed tasks + NumCompletedTasks int `json:"Completed tasks"` + + // Failure is true if any of its tasks failed + Failure bool + + // Insights produced in this experiment + Insights *base.Insights + + // Iter8Version is the version of Iter8 CLI that created this result object + Iter8Version string `json:"Iter8 version"` +} + // httpEndpointRow is the data needed to produce a single row for an HTTP experiment in the Iter8 Grafana dashboard type httpEndpointRow struct { Durations grafanaHistogram @@ -81,7 +109,7 @@ type httpDashboard struct { // key is the endpoint Endpoints map[string]httpEndpointRow - Summary base.ExperimentResult + Summary dashboardSummary } type ghzStatistics struct { @@ -100,7 +128,7 @@ type ghzDashboard struct { // key is the endpoint Endpoints map[string]ghzEndpointRow - Summary base.ExperimentResult + Summary dashboardSummary } var allRoutemaps controllers.AllRouteMapsInterface = &controllers.DefaultRoutemaps{} @@ -472,7 +500,16 @@ func getHTTPDashboardHelper(fortioResult util.HTTPResult) httpDashboard { } // add summary - dashboard.Summary = fortioResult.Summary + dashboard.Summary = dashboardSummary{ + Name: fortioResult.Summary.Name, + Namespace: fortioResult.Summary.Namespace, + Revision: fortioResult.Summary.Revision, + StartTime: fortioResult.Summary.StartTime.Time.Format(timeFormat), + NumCompletedTasks: fortioResult.Summary.NumCompletedTasks, + Failure: fortioResult.Summary.Failure, + Insights: fortioResult.Summary.Insights, + Iter8Version: fortioResult.Summary.Iter8Version, + } return dashboard } @@ -651,7 +688,17 @@ func getGRPCDashboardHelper(ghzResult util.GHZResult) ghzDashboard { dashboard.Endpoints[endpoint] = getGRPCEndpointRow(endpointResult) } - dashboard.Summary = ghzResult.Summary + // add summary + dashboard.Summary = dashboardSummary{ + Name: ghzResult.Summary.Name, + Namespace: ghzResult.Summary.Namespace, + Revision: ghzResult.Summary.Revision, + StartTime: ghzResult.Summary.StartTime.Time.Format(timeFormat), + NumCompletedTasks: ghzResult.Summary.NumCompletedTasks, + Failure: ghzResult.Summary.Failure, + Insights: ghzResult.Summary.Insights, + Iter8Version: ghzResult.Summary.Iter8Version, + } return dashboard } From d125727f89e9366f98aaf9c5801600ae56a81cf3 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 14 Aug 2023 09:18:17 -0400 Subject: [PATCH 064/121] Bump version Signed-off-by: Alan Cha --- Dockerfile | 2 +- base/util.go | 4 ++-- kustomize/iter8/namespaceScoped/configmap.yaml | 2 +- kustomize/iter8/namespaceScoped/statefulset.yaml | 2 +- testdata/controllers/blue-green-http-kserve/initialize.sh | 2 +- testdata/controllers/canary-http-kserve/initialize.sh | 2 +- testdata/controllers/mirror-grpc-kserve/initialize.sh | 2 +- testdata/controllers/mirror/default-routing.sh | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6542f07ce..bd37f6a99 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ RUN apt-get update && apt-get install -y curl # Set Iter8 version from build args ARG TAG -ENV TAG=${TAG:-v0.15.0} +ENV TAG=${TAG:-v0.16.0} # Download iter8 compressed binary RUN curl -LO https://github.com/iter8-tools/iter8/releases/download/${TAG}/iter8-linux-amd64.tar.gz diff --git a/base/util.go b/base/util.go index a93f8e0af..2b8aee1dd 100644 --- a/base/util.go +++ b/base/util.go @@ -19,11 +19,11 @@ import ( // MajorMinor is the minor version of Iter8 // set this manually whenever the major or minor version changes -var MajorMinor = "v0.15" +var MajorMinor = "v0.16" // Version is the semantic version of Iter8 (with the `v` prefix) // Version is intended to be set using LDFLAGS at build time -var Version = "v0.15.0" +var Version = "v0.16.0" const ( toYAMLString = "toYaml" diff --git a/kustomize/iter8/namespaceScoped/configmap.yaml b/kustomize/iter8/namespaceScoped/configmap.yaml index 47e6c1db1..8e1fcce0b 100644 --- a/kustomize/iter8/namespaceScoped/configmap.yaml +++ b/kustomize/iter8/namespaceScoped/configmap.yaml @@ -5,7 +5,7 @@ metadata: data: config.yaml: | defaultResync: 15m - image: iter8/iter8:0.15 + image: iter8/iter8:0.16 logLevel: info resourceTypes: cm: diff --git a/kustomize/iter8/namespaceScoped/statefulset.yaml b/kustomize/iter8/namespaceScoped/statefulset.yaml index 293e35872..8b7bcc377 100644 --- a/kustomize/iter8/namespaceScoped/statefulset.yaml +++ b/kustomize/iter8/namespaceScoped/statefulset.yaml @@ -16,7 +16,7 @@ spec: serviceAccountName: iter8 containers: - name: iter8-traffic - image: iter8/iter8:0.15 + image: iter8/iter8:0.16 imagePullPolicy: Always command: ["/bin/iter8"] args: ["controllers", "-l", "info"] diff --git a/testdata/controllers/blue-green-http-kserve/initialize.sh b/testdata/controllers/blue-green-http-kserve/initialize.sh index 553af76b1..b82424580 100755 --- a/testdata/controllers/blue-green-http-kserve/initialize.sh +++ b/testdata/controllers/blue-green-http-kserve/initialize.sh @@ -32,7 +32,7 @@ metadata: labels: app.kubernetes.io/managed-by: iter8 iter8.tools/kind: routemap - iter8.tools/version: v0.15 + iter8.tools/version: v0.16 data: strSpec: | versions: diff --git a/testdata/controllers/canary-http-kserve/initialize.sh b/testdata/controllers/canary-http-kserve/initialize.sh index 388250c93..1efd57143 100755 --- a/testdata/controllers/canary-http-kserve/initialize.sh +++ b/testdata/controllers/canary-http-kserve/initialize.sh @@ -32,7 +32,7 @@ metadata: labels: app.kubernetes.io/managed-by: iter8 iter8.tools/kind: routemap - iter8.tools/version: v0.15 + iter8.tools/version: v0.16 data: strSpec: | versions: diff --git a/testdata/controllers/mirror-grpc-kserve/initialize.sh b/testdata/controllers/mirror-grpc-kserve/initialize.sh index 1b62b0f0a..21cd46d97 100755 --- a/testdata/controllers/mirror-grpc-kserve/initialize.sh +++ b/testdata/controllers/mirror-grpc-kserve/initialize.sh @@ -43,7 +43,7 @@ metadata: labels: app.kubernetes.io/managed-by: iter8 iter8.tools/kind: routemap - iter8.tools/version: v0.15 + iter8.tools/version: v0.16 data: strSpec: | versions: diff --git a/testdata/controllers/mirror/default-routing.sh b/testdata/controllers/mirror/default-routing.sh index 33214356e..20b72f341 100755 --- a/testdata/controllers/mirror/default-routing.sh +++ b/testdata/controllers/mirror/default-routing.sh @@ -49,7 +49,7 @@ metadata: labels: app.kubernetes.io/managed-by: iter8 iter8.tools/kind: routemap - iter8.tools/version: v0.15 + iter8.tools/version: v0.16 data: strSpec: | versions: From 58165dc280c575d06c2f472c9ad4c4346e6b6fe5 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 14 Aug 2023 12:51:50 -0400 Subject: [PATCH 065/121] Add get experiment result methods Signed-off-by: Alan Cha --- storage/badgerdb/simple.go | 68 +++++++++++++++++++++++++++++++++----- storage/interface.go | 17 +++++++--- 2 files changed, 73 insertions(+), 12 deletions(-) diff --git a/storage/badgerdb/simple.go b/storage/badgerdb/simple.go index 53ce60a36..0a3a21e0a 100644 --- a/storage/badgerdb/simple.go +++ b/storage/badgerdb/simple.go @@ -2,6 +2,7 @@ package badgerdb import ( + "encoding/json" "errors" "fmt" "os" @@ -11,6 +12,7 @@ import ( "github.com/dgraph-io/badger/v4" "github.com/imdario/mergo" + "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/storage" ) @@ -313,26 +315,67 @@ func (cl Client) GetMetrics(applicationName string, version int, signature strin return &metrics, nil } +func getDataKey(namespace, experiment string) string { + // getResultKey() is just getUserPrefix() with the user appended at the end + return fmt.Sprintf("kt-data::%s::%s", namespace, experiment) +} + +// SetData sets arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment name +// the data is []byte in order to make this function reusable for different tasks +func (cl Client) SetData(namespace, experiment string, data []byte) error { + key := getDataKey(namespace, experiment) + + return cl.db.Update(func(txn *badger.Txn) error { + e := badger.NewEntry([]byte(key), data).WithTTL(cl.additionalOptions.TTL) + err := txn.SetEntry(e) + return err + }) +} + +// GetData returns arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment name +// the data is []byte in order to make this function reusable for different tasks +func (cl Client) GetData(namespace, experiment string) ([]byte, error) { + var valCopy []byte + err := cl.db.View(func(txn *badger.Txn) error { + item, err := txn.Get([]byte(getDataKey(namespace, experiment))) + if err != nil { + return err + } + + valCopy, err = item.ValueCopy(nil) + if err != nil { + return err + } + + return nil + }) + + return valCopy, err +} + func getResultKey(namespace, experiment string) string { // getResultKey() is just getUserPrefix() with the user appended at the end return fmt.Sprintf("kt-result::%s::%s", namespace, experiment) } -// SetResult sets the result of a particular HTTP/gRPC run for a particular namespace and experiment name -// the data is []byte in order to make this function reusable for HTTP and gRPC -func (cl Client) SetResult(namespace, experiment string, data []byte) error { +// SetResult sets the experiment result for a particular namespace and experiment name +func (cl Client) SetResult(namespace, experiment string, experimentResult *base.ExperimentResult) error { key := getResultKey(namespace, experiment) + experimentResultJSON, err := json.Marshal(experimentResult) + if err != nil { + return err + } + return cl.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte(key), data).WithTTL(cl.additionalOptions.TTL) + e := badger.NewEntry([]byte(key), []byte(experimentResultJSON)).WithTTL(cl.additionalOptions.TTL) err := txn.SetEntry(e) return err }) } -// GetResult returns the result of a particular HTTP/gRPC run for a particular namespace and experiment name -// the data is []byte in order to make this function reusable for HTTP and gRPC -func (cl Client) GetResult(namespace, experiment string) ([]byte, error) { +// GetData returns the experiment result for a particular namespace and experiment name +func (cl Client) GetResults(namespace, experiment string) (*base.ExperimentResult, error) { var valCopy []byte err := cl.db.View(func(txn *badger.Txn) error { item, err := txn.Get([]byte(getResultKey(namespace, experiment))) @@ -347,6 +390,15 @@ func (cl Client) GetResult(namespace, experiment string) ([]byte, error) { return nil }) + if err != nil { + return nil, err + } - return valCopy, err + experimentResult := base.ExperimentResult{} + err = json.Unmarshal(valCopy, &experimentResult) + if err != nil { + return nil, err + } + + return &experimentResult, err } diff --git a/storage/interface.go b/storage/interface.go index a44ed7ce1..05abcf00a 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -1,6 +1,8 @@ // Package storage provides the storage client for the controllers package package storage +import "github.com/iter8-tools/iter8/base" + // SummarizedMetric is a metric summary type SummarizedMetric struct { Count uint64 @@ -64,9 +66,16 @@ type Interface interface { // Example key: kt-users::my-app::0::my-signature::my-user -> true SetUser(applicationName string, version int, signature, user string) error - // returns the HTTP/gRPC results for a particular namespace and experiment - GetResult(namespace, experiment string) ([]byte, error) + // returns arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment + GetData(namespace, experiment string) ([]byte, error) + + // Example key: kt-data::my-namespace::my-experiment-name -> per endpoint JSON data + summary + SetData(namespace, experiment string, data []byte) error + + // get ExperimentResult for a particular namespace and experiment + GetResults(namespace, experiment string) (*base.ExperimentResult, error) - // Example key: kt-result::my-namespace::my-experiment-name -> per endpoint JSON data + summary - SetResult(namespace, experiment string, data []byte) error + // called by the A/B/n SDK gRPC API implementation (SDK for application clients) + // Example key: kt-metric::my-app::0::my-signature::my-metric::my-user::my-transaction-id -> my-metric-value (get the metric value with all the provided information) + SetResult(namespace, experiment string, experimentResult *base.ExperimentResult) error } From e17782e0128dac8b2afb50cc6e75584671e904ea Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 16 Aug 2023 09:13:06 -0400 Subject: [PATCH 066/121] Temp Signed-off-by: Alan Cha --- action/launch_test.go | 7 + action/run_test.go | 4 +- base/collect_grpc.go | 16 +- base/collect_grpc_test.go | 30 +- base/collect_http.go | 19 +- base/collect_http_test.go | 22 +- base/experiment.go | 38 +- base/experiment_test.go | 8 +- base/metrics.go | 42 +- base/test_helpers.go | 46 +- cmd/kassert_test.go | 4 +- cmd/krun_test.go | 25 +- driver/filedriver_test.go | 9 +- driver/kubedriver_test.go | 4 +- metrics/server.go | 210 +++++-- metrics/server_test.go | 1047 ++++++++++++++++--------------- storage/badgerdb/simple.go | 96 ++- storage/badgerdb/simple_test.go | 8 +- storage/interface.go | 13 +- 19 files changed, 950 insertions(+), 698 deletions(-) diff --git a/action/launch_test.go b/action/launch_test.go index 30a64ae85..7ba10974b 100644 --- a/action/launch_test.go +++ b/action/launch_test.go @@ -1,6 +1,7 @@ package action import ( + "fmt" "os" "testing" @@ -21,6 +22,12 @@ func TestKubeLaunch(t *testing.T) { err = lOpts.KubeRun() assert.NoError(t, err) + // x, _ := json.Marshal(lOpts) + // fmt.Println(string(x)) + + fmt.Println(lOpts.Group) + fmt.Println(lOpts.Releases) + rel, err := lOpts.Releases.Last(lOpts.Group) assert.NotNil(t, rel) assert.Equal(t, 1, rel.Version) diff --git a/action/run_test.go b/action/run_test.go index 1cacc5e54..ec467c64a 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -58,8 +58,8 @@ func TestKubeRun(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) + if _, ok := bodyFortioResult[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) } }, }) diff --git a/base/collect_grpc.go b/base/collect_grpc.go index d9bc94768..8f63243a0 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -42,12 +42,8 @@ type collectGRPCTask struct { // GHZResult is the raw data sent to the metrics server // This data will be transformed into httpDashboard when getGHZGrafana is called -type GHZResult struct { - // key is the endpoint - EndpointResults map[string]*runner.Report - - Summary ExperimentResult -} +// Key is the endpoint +type GHZResult map[string]*runner.Report // initializeDefaults sets default values for the collect task func (t *collectGRPCTask) initializeDefaults() { @@ -160,12 +156,6 @@ func (t *collectGRPCTask) run(exp *Experiment) error { return err } - // push data to metrics service - ghzResult := GHZResult{ - EndpointResults: data, - Summary: *exp.Result, - } - // get URL of metrics server from environment variable metricsServerURL, ok := os.LookupEnv(MetricsServerURL) if !ok { @@ -174,7 +164,7 @@ func (t *collectGRPCTask) run(exp *Experiment) error { return fmt.Errorf(errorMessage) } - if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, ghzResult); err != nil { + if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, data); err != nil { return err } diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index c180207ff..64170c422 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -57,8 +57,8 @@ func TestRunCollectGRPCUnary(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[call]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", call)) + if _, ok := bodyFortioResult[call]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", call)) } }, }) @@ -170,20 +170,20 @@ func TestRunCollectGRPCMultipleEndpoints(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[unary]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", unary)) + if _, ok := bodyFortioResult[unary]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", unary)) } - if _, ok := bodyFortioResult.EndpointResults[server]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", server)) + if _, ok := bodyFortioResult[server]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", server)) } - if _, ok := bodyFortioResult.EndpointResults[client]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", client)) + if _, ok := bodyFortioResult[client]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", client)) } - if _, ok := bodyFortioResult.EndpointResults[bidirectional]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", bidirectional)) + if _, ok := bodyFortioResult[bidirectional]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", bidirectional)) } }, }) @@ -283,7 +283,7 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { bodyFortioResult := HTTPResult{} err = json.Unmarshal(body, &bodyFortioResult) assert.NoError(t, err) - assert.Equal(t, `{"EndpointResults":{},"Summary":{"numVersions":1,"versionNames":null}}`, string(body)) + assert.Equal(t, `{}`, string(body)) }, }) @@ -364,12 +364,12 @@ func TestRunCollectGRPCSingleEndpointMultipleCalls(t *testing.T) { fmt.Println(string(body)) - if _, ok := bodyFortioResult.EndpointResults[unary]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", unary)) + if _, ok := bodyFortioResult[unary]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", unary)) } - if _, ok := bodyFortioResult.EndpointResults[unary2]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", unary2)) + if _, ok := bodyFortioResult[unary2]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", unary2)) } }, }) diff --git a/base/collect_http.go b/base/collect_http.go index 874879213..340dca439 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -61,12 +61,8 @@ type collectHTTPInputs struct { // HTTPResult is the raw data sent to the metrics server // This data will be transformed into httpDashboard when getHTTPGrafana is called -type HTTPResult struct { - // key is the endpoint - EndpointResults map[string]*fhttp.HTTPRunnerResults - - Summary ExperimentResult -} +// Key is the endpoint +type HTTPResult map[string]*fhttp.HTTPRunnerResults const ( // CollectHTTPTaskName is the name of this task which performs load generation and metrics collection. @@ -307,15 +303,6 @@ func (t *collectHTTPTask) run(exp *Experiment) error { result, _ := json.Marshal(exp.Result) log.Logger.Trace("before fortioResult", string(result)) - // push data to metrics service - fortioResult := HTTPResult{ - EndpointResults: data, - Summary: *exp.Result, - } - - fortioResultJson, _ := json.Marshal(fortioResult) - log.Logger.Trace("fortioResultJson", string(fortioResultJson)) - // get URL of metrics server from environment variable metricsServerURL, ok := os.LookupEnv(MetricsServerURL) if !ok { @@ -324,7 +311,7 @@ func (t *collectHTTPTask) run(exp *Experiment) error { return fmt.Errorf(errorMessage) } - if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, fortioResult); err != nil { + if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, data); err != nil { return err } diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 1ec2f1dcd..91cb96435 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -75,8 +75,8 @@ func TestRunCollectHTTP(t *testing.T) { fmt.Println(string(body)) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) + if _, ok := bodyFortioResult[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) } }, }) @@ -208,12 +208,12 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[endpoint1]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", endpoint1)) + if _, ok := bodyFortioResult[endpoint1]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", endpoint1)) } - if _, ok := bodyFortioResult.EndpointResults[endpoint2]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", endpoint2)) + if _, ok := bodyFortioResult[endpoint2]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", endpoint2)) } }, }) @@ -314,12 +314,12 @@ func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[endpoint1]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", endpoint1)) + if _, ok := bodyFortioResult[endpoint1]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", endpoint1)) } - if _, ok := bodyFortioResult.EndpointResults[endpoint2]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", endpoint2)) + if _, ok := bodyFortioResult[endpoint2]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", endpoint2)) } }, }) @@ -403,7 +403,7 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { assert.NoError(t, err) // no EndpointResults because endpoints cannot be reached - assert.Equal(t, `{"EndpointResults":{},"Summary":{"numVersions":1,"versionNames":null}}`, string(body)) + assert.Equal(t, `{}`, string(body)) }, }) diff --git a/base/experiment.go b/base/experiment.go index 88a33b59e..c8919f8b5 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "fmt" + "os" "github.com/antonmedv/expr" log "github.com/iter8-tools/iter8/base/log" @@ -253,6 +254,7 @@ type Driver interface { // Read the experiment Read() (*Experiment, error) + // deprecated // Write the experiment Write(e *Experiment) error @@ -280,23 +282,28 @@ func (exp *Experiment) NoFailure() bool { // run the experiment func (exp *Experiment) run(driver Driver) error { var err error + + // TODO: reduce repetition, create package local variable and do validation + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } + exp.driver = driver if exp.Result == nil { err = errors.New("experiment with nil result section cannot be run") log.Logger.Error(err) return err } - log.Logger.Debug("exp result exists now ... ") - err = driver.Write(exp) - if err != nil { - return err - } - log.Logger.Debugf("attempting to execute %v tasks", len(exp.Spec)) for i, t := range exp.Spec { log.Logger.Info("task " + fmt.Sprintf("%v: %v", i+1, *getName(t)) + ": started") + shouldRun := true // if task has a condition if cond := getIf(t); cond != nil { @@ -320,9 +327,15 @@ func (exp *Experiment) run(driver Driver) error { if err != nil { log.Logger.Error("task " + fmt.Sprintf("%v: %v", i+1, *getName(t)) + ": " + "failure") exp.failExperiment() - e := driver.Write(exp) - if e != nil { - return e + + // TODO: remove + err = driver.Write(exp) + if err != nil { + return err + } + err = putExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) + if err != nil { + return err } return err } @@ -332,8 +345,13 @@ func (exp *Experiment) run(driver Driver) error { } exp.incrementNumCompletedTasks() - err = driver.Write(exp) + // TODO: remove + err = driver.Write(exp) + if err != nil { + return err + } + err = putExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) if err != nil { return err } diff --git a/base/experiment_test.go b/base/experiment_test.go index 308785632..ce28df03a 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -66,8 +66,8 @@ func TestRunningTasks(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) + if _, ok := bodyFortioResult[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) } }, }) @@ -140,8 +140,8 @@ func TestRunExperiment(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) + if _, ok := bodyFortioResult[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) } }, }) diff --git a/base/metrics.go b/base/metrics.go index 63d9f6aae..d7c990ee4 100644 --- a/base/metrics.go +++ b/base/metrics.go @@ -13,39 +13,45 @@ import ( const ( // MetricsServerURL is the URL of the metrics server MetricsServerURL = "METRICS_SERVER_URL" - // PerformanceResultPath is the path to the PUT performanceResult/ endpoint - PerformanceResultPath = "/performanceResult" - // HTTPDashboardPath is the path to the GET httpDashboard/ endpoint + // MetricsPath is the path to the GET /metrics endpoint + MetricsPath = "/metrics" + + // PerformanceResultPath is the path to the PUT /performanceResult endpoint + PerformanceResultPath = "/performanceResult" + // ExperimentResultPath is the path to the PUT /experimentResult endpoint + ExperimentResultPath = "/experimentResult" + // HTTPDashboardPath is the path to the GET /httpDashboard endpoint HTTPDashboardPath = "/httpDashboard" - // GRPCDashboardPath is the path to the GET grpcDashboard/ endpoint + // GRPCDashboardPath is the path to the GET /grpcDashboard endpoint GRPCDashboardPath = "/grpcDashboard" ) -func putPerformanceResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { +func callMetricsService(method, metricsServerURL, path string, queryParams map[string]string, payload interface{}) error { // handle URL and URL parameters - u, err := url.ParseRequestURI(metricsServerURL + PerformanceResultPath) + u, err := url.ParseRequestURI(metricsServerURL + path) if err != nil { return err } params := url.Values{} - params.Add("namespace", namespace) - params.Add("experiment", experiment) + for paramKey, paramValue := range queryParams { + params.Add(paramKey, paramValue) + } u.RawQuery = params.Encode() urlStr := fmt.Sprintf("%v", u) - log.Logger.Trace(fmt.Sprintf("performance result URL: %s", urlStr)) + log.Logger.Trace(fmt.Sprintf("call metrics service URL: %s", urlStr)) // handle payload - dataBytes, err := json.Marshal(data) + dataBytes, err := json.Marshal(payload) if err != nil { log.Logger.Error("cannot JSON marshal data for metrics server request: ", err) return err } // create request - req, err := http.NewRequest(http.MethodPut, urlStr, bytes.NewBuffer(dataBytes)) + req, err := http.NewRequest(method, urlStr, bytes.NewBuffer(dataBytes)) if err != nil { log.Logger.Error("cannot create new HTTP request metrics server: ", err) return err @@ -73,3 +79,17 @@ func putPerformanceResultToMetricsService(metricsServerURL, namespace, experimen return nil } + +func putPerformanceResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { + return callMetricsService(http.MethodPut, metricsServerURL, PerformanceResultPath, map[string]string{ + "namespace": namespace, + "experiment": experiment, + }, data) +} + +func putExperimentResultToMetricsService(metricsServerURL, namespace, experiment string, experimentResult *ExperimentResult) error { + return callMetricsService(http.MethodPut, metricsServerURL, ExperimentResultPath, map[string]string{ + "namespace": namespace, + "experiment": experiment, + }, experimentResult) +} diff --git a/base/test_helpers.go b/base/test_helpers.go index 8bb29c7b2..f855a039d 100644 --- a/base/test_helpers.go +++ b/base/test_helpers.go @@ -81,17 +81,43 @@ type MetricsServerCallback func(req *http.Request) type MockMetricsServerInput struct { MetricsServerURL string - // GET /httpDashboard - HTTPDashboardCallback MetricsServerCallback - // GET /grpcDashboard - GRPCDashboardCallback MetricsServerCallback // PUT /performanceResult PerformanceResultCallback MetricsServerCallback + // PUT /experimentResult + ExperimentResultCallback MetricsServerCallback + // GET /grpcDashboard + GRPCDashboardCallback MetricsServerCallback + // GET /httpDashboard + HTTPDashboardCallback MetricsServerCallback } // MockMetricsServer is a mock metrics server // use the callback functions in the MockMetricsServerInput to test if those endpoints are called func MockMetricsServer(input MockMetricsServerInput) { + // PUT /performanceResult + httpmock.RegisterResponder( + http.MethodPut, + input.MetricsServerURL+PerformanceResultPath, + func(req *http.Request) (*http.Response, error) { + if input.PerformanceResultCallback != nil { + input.PerformanceResultCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + + // PUT /experimentResult + httpmock.RegisterResponder( + http.MethodPut, + input.MetricsServerURL+ExperimentResultPath, + func(req *http.Request) (*http.Response, error) { + if input.ExperimentResultCallback != nil { + input.ExperimentResultCallback(req) + } + return httpmock.NewStringResponse(200, "success"), nil + }, + ) + // GET /httpDashboard httpmock.RegisterResponder( http.MethodGet, @@ -116,16 +142,4 @@ func MockMetricsServer(input MockMetricsServerInput) { return httpmock.NewStringResponse(200, "success"), nil }, ) - - // PUT /performanceResult - httpmock.RegisterResponder( - http.MethodPut, - input.MetricsServerURL+PerformanceResultPath, - func(req *http.Request) (*http.Response, error) { - if input.PerformanceResultCallback != nil { - input.PerformanceResultCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) } diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go index 92545f268..5af259c2a 100644 --- a/cmd/kassert_test.go +++ b/cmd/kassert_test.go @@ -59,8 +59,8 @@ func TestKAssert(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) + if _, ok := bodyFortioResult[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) } }, }) diff --git a/cmd/krun_test.go b/cmd/krun_test.go index cf6bc1fb3..74ed976e2 100644 --- a/cmd/krun_test.go +++ b/cmd/krun_test.go @@ -52,10 +52,31 @@ func TestKRun(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) + if _, ok := bodyFortioResult[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) } }, + ExperimentResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyExperimentResult := base.ExperimentResult{} + + err = json.Unmarshal(body, &bodyExperimentResult) + assert.NoError(t, err) + assert.NotNil(t, body) + assert.Equal(t, myName, bodyExperimentResult.Name) + assert.Equal(t, myNamespace, bodyExperimentResult.Namespace) + }, }) _ = os.Chdir(t.TempDir()) diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 661340687..94f300492 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -53,8 +53,8 @@ func TestLocalRun(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) + if _, ok := bodyFortioResult[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) } }, }) @@ -75,6 +75,11 @@ func TestLocalRun(t *testing.T) { // check results exp, err := base.BuildExperiment(&fd) assert.NoError(t, err) + + x, _ := json.Marshal(exp) + fmt.Println(string(x)) + fmt.Println(err, exp.Completed(), exp.NoFailure()) + assert.True(t, exp.Completed() && exp.NoFailure()) assert.True(t, metricsServerCalled) } diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index c86090c2f..c6f2a2f8b 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -102,8 +102,8 @@ func TestKubeRun(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, body) - if _, ok := bodyFortioResult.EndpointResults[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult.EndpointResult does not contain endpoint: %s", url)) + if _, ok := bodyFortioResult[url]; !ok { + assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) } }, }) diff --git a/metrics/server.go b/metrics/server.go index 4a92c70fb..cb09458cd 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -67,8 +67,8 @@ type metricSummary struct { SummaryOverUsers []*versionSummarizedMetric } -// dashboardSummary is a properly capitalized version of ExperimentResult -type dashboardSummary struct { +// dashboardExperimentResult is a capitalized version of ExperimentResult used to display data in Grafana +type dashboardExperimentResult struct { // Name is the name of this experiment Name string @@ -109,7 +109,7 @@ type httpDashboard struct { // key is the endpoint Endpoints map[string]httpEndpointRow - Summary dashboardSummary + ExperimentResult dashboardExperimentResult } type ghzStatistics struct { @@ -128,7 +128,7 @@ type ghzDashboard struct { // key is the endpoint Endpoints map[string]ghzEndpointRow - Summary dashboardSummary + ExperimentResult dashboardExperimentResult } var allRoutemaps controllers.AllRouteMapsInterface = &controllers.DefaultRoutemaps{} @@ -148,8 +148,10 @@ func Start(stopCh <-chan struct{}) error { } // configure endpoints - http.HandleFunc("/metrics", getMetrics) - http.HandleFunc(util.PerformanceResultPath, putResult) + http.HandleFunc(util.MetricsPath, getMetrics) + + http.HandleFunc(util.PerformanceResultPath, putPerformanceResult) + http.HandleFunc(util.ExperimentResultPath, putExperimentResult) http.HandleFunc(util.HTTPDashboardPath, getHTTPDashboard) http.HandleFunc(util.GRPCDashboardPath, getGRPCDashboard) @@ -188,7 +190,7 @@ func getMetrics(w http.ResponseWriter, r *http.Request) { return } - // verify request (query parameter) + // verify request (query parameters) application := r.URL.Query().Get("application") if application == "" { http.Error(w, "no application specified", http.StatusBadRequest) @@ -489,33 +491,93 @@ func getHTTPEndpointRow(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpoint return row } -func getHTTPDashboardHelper(fortioResult util.HTTPResult) httpDashboard { +func getHTTPDashboardHelper(fortioResult util.HTTPResult, experimentResult base.ExperimentResult) httpDashboard { dashboard := httpDashboard{ Endpoints: map[string]httpEndpointRow{}, } - for endpoint, endpointResult := range fortioResult.EndpointResults { + for endpoint, endpointResult := range fortioResult { endpointResult := endpointResult dashboard.Endpoints[endpoint] = getHTTPEndpointRow(endpointResult) } - // add summary - dashboard.Summary = dashboardSummary{ - Name: fortioResult.Summary.Name, - Namespace: fortioResult.Summary.Namespace, - Revision: fortioResult.Summary.Revision, - StartTime: fortioResult.Summary.StartTime.Time.Format(timeFormat), - NumCompletedTasks: fortioResult.Summary.NumCompletedTasks, - Failure: fortioResult.Summary.Failure, - Insights: fortioResult.Summary.Insights, - Iter8Version: fortioResult.Summary.Iter8Version, + experimentResultJSON, _ := json.Marshal(experimentResult) + fmt.Println(string(experimentResultJSON)) + + dashboard.ExperimentResult = dashboardExperimentResult{ + Name: experimentResult.Name, + Namespace: experimentResult.Namespace, + Revision: experimentResult.Revision, + StartTime: experimentResult.StartTime.Time.Format(timeFormat), + NumCompletedTasks: experimentResult.NumCompletedTasks, + Failure: experimentResult.Failure, + Insights: experimentResult.Insights, + Iter8Version: experimentResult.Iter8Version, } return dashboard } -// putResult handles PUT /result with query parameter application=namespace/name -func putResult(w http.ResponseWriter, r *http.Request) { +// putPerformanceResult handles PUT /performanceResult with query parameter application=namespace/name +func putPerformanceResult(w http.ResponseWriter, r *http.Request) { + log.Logger.Trace("putPerformanceResult called") + defer log.Logger.Trace("putPerformanceResult completed") + + // verify method + if r.Method != http.MethodPut { + http.Error(w, "expected PUT", http.StatusMethodNotAllowed) + return + } + + // verify request (query parameters) + namespace := r.URL.Query().Get("namespace") + if namespace == "" { + http.Error(w, "no namespace specified", http.StatusBadRequest) + return + } + + experiment := r.URL.Query().Get("experiment") + if experiment == "" { + http.Error(w, "no experiment specified", http.StatusBadRequest) + return + } + + log.Logger.Tracef("putPerformanceResult called for namespace %s and experiment %s", namespace, experiment) + + defer func() { + err := r.Body.Close() + if err != nil { + errorMessage := fmt.Sprintf("cannot close request body: %e", err) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + }() + body, err := io.ReadAll(r.Body) + if err != nil { + errorMessage := fmt.Sprintf("cannot read request body: %e", err) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + + if abn.MetricsClient == nil { + http.Error(w, "no metrics client", http.StatusInternalServerError) + return + } + err = abn.MetricsClient.SetData(namespace, experiment, body) + if err != nil { + errorMessage := fmt.Sprintf("cannot store result in storage client: %s: %e", string(body), err) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusInternalServerError) + return + } + + // TODO: 201 for new resource, 200 for update +} + +// putExperimentResult handles PUT /experimentResult with query parameter application=namespace/name +func putExperimentResult(w http.ResponseWriter, r *http.Request) { log.Logger.Trace("putResult called") defer log.Logger.Trace("putResult completed") @@ -525,9 +587,7 @@ func putResult(w http.ResponseWriter, r *http.Request) { return } - // verify request (query parameter) - // Key: kt-result::my-namespace::my-experiment-name::my-endpoint - // Should namespace and experiment name come from application? + // verify request (query parameters) namespace := r.URL.Query().Get("namespace") if namespace == "" { http.Error(w, "no namespace specified", http.StatusBadRequest) @@ -563,7 +623,8 @@ func putResult(w http.ResponseWriter, r *http.Request) { http.Error(w, "no metrics client", http.StatusInternalServerError) return } - err = abn.MetricsClient.SetResult(namespace, experiment, body) + + err = abn.MetricsClient.SetExperimentResult(namespace, experiment, body) if err != nil { errorMessage := fmt.Sprintf("cannot store result in storage client: %s: %e", string(body), err) log.Logger.Error(errorMessage) @@ -585,9 +646,7 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { return } - // verify request (query parameter) - // required namespace and experiment name - // Key: kt-result::my-namespace::my-experiment-name::my-endpoint + // verify request (query parameters) namespace := r.URL.Query().Get("namespace") if namespace == "" { http.Error(w, "no namespace specified", http.StatusBadRequest) @@ -602,30 +661,48 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { log.Logger.Tracef("getHTTPGrafana called for namespace %s and experiment %s", namespace, experiment) - // get result from metrics client + // get fortioResult from metrics client if abn.MetricsClient == nil { http.Error(w, "no metrics client", http.StatusInternalServerError) return } - result, err := abn.MetricsClient.GetResult(namespace, experiment) + fortioResultsBytes, err := abn.MetricsClient.GetData(namespace, experiment) if err != nil { - errorMessage := fmt.Sprintf("cannot get result with namespace %s, experiment %s", namespace, experiment) + errorMessage := fmt.Sprintf("cannot get Fortio result with namespace %s, experiment %s", namespace, experiment) log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusBadRequest) return } fortioResult := util.HTTPResult{} - err = json.Unmarshal(result, &fortioResult) + err = json.Unmarshal(fortioResultsBytes, &fortioResult) if err != nil { - errorMessage := fmt.Sprintf("cannot JSON unmarshal result into FortioResult: \"%s\"", string(result)) + errorMessage := fmt.Sprintf("cannot JSON unmarshal FortioResult: \"%s\"", string(fortioResultsBytes)) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusInternalServerError) + return + } + + // get experimentResult from metrics client + experimentResultBytes, err := abn.MetricsClient.GetExperimentResult(namespace, experiment) + if err != nil { + errorMessage := fmt.Sprintf("cannot get experiment result with namespace %s, experiment %s", namespace, experiment) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + + experimentResult := util.ExperimentResult{} + err = json.Unmarshal(experimentResultBytes, &experimentResult) + if err != nil { + errorMessage := fmt.Sprintf("cannot JSON unmarshal ExperimentResult: \"%s\"", string(experimentResultBytes)) log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusInternalServerError) return } // JSON marshal the dashboard - dashboardBytes, err := json.Marshal(getHTTPDashboardHelper(fortioResult)) + dashboardBytes, err := json.Marshal(getHTTPDashboardHelper(fortioResult, experimentResult)) if err != nil { errorMessage := "cannot JSON marshal HTTP dashboard" log.Logger.Error(errorMessage) @@ -678,34 +755,33 @@ func getGRPCEndpointRow(ghzRunnerReport *runner.Report) ghzEndpointRow { return row } -func getGRPCDashboardHelper(ghzResult util.GHZResult) ghzDashboard { +func getGRPCDashboardHelper(ghzResult util.GHZResult, experimentResult base.ExperimentResult) ghzDashboard { dashboard := ghzDashboard{ Endpoints: map[string]ghzEndpointRow{}, } - for endpoint, endpointResult := range ghzResult.EndpointResults { + for endpoint, endpointResult := range ghzResult { endpointResult := endpointResult dashboard.Endpoints[endpoint] = getGRPCEndpointRow(endpointResult) } - // add summary - dashboard.Summary = dashboardSummary{ - Name: ghzResult.Summary.Name, - Namespace: ghzResult.Summary.Namespace, - Revision: ghzResult.Summary.Revision, - StartTime: ghzResult.Summary.StartTime.Time.Format(timeFormat), - NumCompletedTasks: ghzResult.Summary.NumCompletedTasks, - Failure: ghzResult.Summary.Failure, - Insights: ghzResult.Summary.Insights, - Iter8Version: ghzResult.Summary.Iter8Version, + dashboard.ExperimentResult = dashboardExperimentResult{ + Name: experimentResult.Name, + Namespace: experimentResult.Namespace, + Revision: experimentResult.Revision, + StartTime: experimentResult.StartTime.Time.Format(timeFormat), + NumCompletedTasks: experimentResult.NumCompletedTasks, + Failure: experimentResult.Failure, + Insights: experimentResult.Insights, + Iter8Version: experimentResult.Iter8Version, } return dashboard } func getGRPCDashboard(w http.ResponseWriter, r *http.Request) { - log.Logger.Trace("getGHZDashboard called") - defer log.Logger.Trace("getGHZDashboard completed") + log.Logger.Trace("getGRPCDashboard called") + defer log.Logger.Trace("getGRPCDashboard completed") // verify method if r.Method != http.MethodGet { @@ -713,9 +789,7 @@ func getGRPCDashboard(w http.ResponseWriter, r *http.Request) { return } - // verify request (query parameter) - // required namespace and experiment name - // Key: kt-result::my-namespace::my-experiment-name::my-endpoint + // verify request (query parameters) namespace := r.URL.Query().Get("namespace") if namespace == "" { http.Error(w, "no namespace specified", http.StatusBadRequest) @@ -728,34 +802,52 @@ func getGRPCDashboard(w http.ResponseWriter, r *http.Request) { return } - log.Logger.Tracef("getGHZDashboard called for namespace %s and experiment %s", namespace, experiment) + log.Logger.Tracef("getGRPCDashboard called for namespace %s and experiment %s", namespace, experiment) - // get result from metrics client + // get ghz result from metrics client if abn.MetricsClient == nil { http.Error(w, "no metrics client", http.StatusInternalServerError) return } - result, err := abn.MetricsClient.GetResult(namespace, experiment) + ghzResultBytes, err := abn.MetricsClient.GetData(namespace, experiment) if err != nil { - errorMessage := fmt.Sprintf("cannot get result with namespace %s, experiment %s", namespace, experiment) + errorMessage := fmt.Sprintf("cannot get ghz result with namespace %s, experiment %s", namespace, experiment) log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusBadRequest) return } ghzResult := util.GHZResult{} - err = json.Unmarshal(result, &ghzResult) + err = json.Unmarshal(ghzResultBytes, &ghzResult) + if err != nil { + errorMessage := fmt.Sprintf("cannot JSON unmarshal GHZResult: \"%s\"", string(ghzResultBytes)) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusInternalServerError) + return + } + + // get experimentResult from metrics client + experimentResultBytes, err := abn.MetricsClient.GetExperimentResult(namespace, experiment) + if err != nil { + errorMessage := fmt.Sprintf("cannot get experiment result with namespace %s, experiment %s", namespace, experiment) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + + experimentResult := util.ExperimentResult{} + err = json.Unmarshal(experimentResultBytes, &experimentResult) if err != nil { - errorMessage := fmt.Sprintf("cannot JSON unmarshal result into GHZResult: \"%s\"", string(result)) + errorMessage := fmt.Sprintf("cannot JSON unmarshal ExperimentResult: \"%s\"", string(experimentResultBytes)) log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusInternalServerError) return } // JSON marshal the dashboard - dashboardBytes, err := json.Marshal(getGRPCDashboardHelper(ghzResult)) + dashboardBytes, err := json.Marshal(getGRPCDashboardHelper(ghzResult, experimentResult)) if err != nil { - errorMessage := "cannot JSON marshal ghz dashboard" + errorMessage := "cannot JSON marshal gRPC dashboard" log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusInternalServerError) return diff --git a/metrics/server_test.go b/metrics/server_test.go index a1f16726e..618156f3b 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -24,6 +24,490 @@ import ( "github.com/stretchr/testify/assert" ) +const ( + myName = "my-name" + myNamespace = "my-namespace" +) + +const fortioResultJSON = `{ + "http://httpbin.default/get": { + "RunType": "HTTP", + "Labels": "", + "StartTime": "2023-07-21T14:00:40.134434969Z", + "RequestedQPS": "8", + "RequestedDuration": "exactly 100 calls", + "ActualQPS": 7.975606391552989, + "ActualDuration": 12538231589, + "NumThreads": 4, + "Version": "1.57.3", + "DurationHistogram": { + "Count": 100, + "Min": 0.004223875, + "Max": 0.040490042, + "Sum": 1.5977100850000001, + "Avg": 0.015977100850000002, + "StdDev": 0.008340658047253256, + "Data": [ + { + "Start": 0.004223875, + "End": 0.005, + "Percent": 5, + "Count": 5 + }, + { + "Start": 0.005, + "End": 0.006, + "Percent": 10, + "Count": 5 + }, + { + "Start": 0.006, + "End": 0.007, + "Percent": 14, + "Count": 4 + }, + { + "Start": 0.007, + "End": 0.008, + "Percent": 19, + "Count": 5 + }, + { + "Start": 0.008, + "End": 0.009000000000000001, + "Percent": 24, + "Count": 5 + }, + { + "Start": 0.009000000000000001, + "End": 0.01, + "Percent": 28, + "Count": 4 + }, + { + "Start": 0.01, + "End": 0.011, + "Percent": 33, + "Count": 5 + }, + { + "Start": 0.011, + "End": 0.012, + "Percent": 36, + "Count": 3 + }, + { + "Start": 0.012, + "End": 0.014, + "Percent": 48, + "Count": 12 + }, + { + "Start": 0.014, + "End": 0.016, + "Percent": 55, + "Count": 7 + }, + { + "Start": 0.016, + "End": 0.018000000000000002, + "Percent": 65, + "Count": 10 + }, + { + "Start": 0.018000000000000002, + "End": 0.02, + "Percent": 74, + "Count": 9 + }, + { + "Start": 0.02, + "End": 0.025, + "Percent": 85, + "Count": 11 + }, + { + "Start": 0.025, + "End": 0.03, + "Percent": 93, + "Count": 8 + }, + { + "Start": 0.03, + "End": 0.035, + "Percent": 98, + "Count": 5 + }, + { + "Start": 0.035, + "End": 0.04, + "Percent": 99, + "Count": 1 + }, + { + "Start": 0.04, + "End": 0.040490042, + "Percent": 100, + "Count": 1 + } + ], + "Percentiles": [ + { + "Percentile": 50, + "Value": 0.014571428571428572 + }, + { + "Percentile": 75, + "Value": 0.020454545454545454 + }, + { + "Percentile": 90, + "Value": 0.028125 + }, + { + "Percentile": 95, + "Value": 0.032 + }, + { + "Percentile": 99, + "Value": 0.04 + }, + { + "Percentile": 99.9, + "Value": 0.0404410378 + } + ] + }, + "ErrorsDurationHistogram": { + "Count": 0, + "Min": 0, + "Max": 0, + "Sum": 0, + "Avg": 0, + "StdDev": 0, + "Data": null + }, + "Exactly": 100, + "Jitter": false, + "Uniform": false, + "NoCatchUp": false, + "RunID": 0, + "AccessLoggerInfo": "", + "ID": "2023-07-21-140040", + "RetCodes": { + "200": 100 + }, + "IPCountMap": { + "10.96.108.76:80": 4 + }, + "Insecure": false, + "MTLS": false, + "CACert": "", + "Cert": "", + "Key": "", + "UnixDomainSocket": "", + "URL": "http://httpbin.default/get", + "NumConnections": 1, + "Compression": false, + "DisableFastClient": false, + "HTTP10": false, + "H2": false, + "DisableKeepAlive": false, + "AllowHalfClose": false, + "FollowRedirects": false, + "Resolve": "", + "HTTPReqTimeOut": 3000000000, + "UserCredentials": "", + "ContentType": "", + "Payload": null, + "MethodOverride": "", + "LogErrors": false, + "SequentialWarmup": false, + "ConnReuseRange": [ + 0, + 0 + ], + "NoResolveEachConn": false, + "Offset": 0, + "Resolution": 0.001, + "Sizes": { + "Count": 100, + "Min": 413, + "Max": 413, + "Sum": 41300, + "Avg": 413, + "StdDev": 0, + "Data": [ + { + "Start": 413, + "End": 413, + "Percent": 100, + "Count": 100 + } + ] + }, + "HeaderSizes": { + "Count": 100, + "Min": 230, + "Max": 230, + "Sum": 23000, + "Avg": 230, + "StdDev": 0, + "Data": [ + { + "Start": 230, + "End": 230, + "Percent": 100, + "Count": 100 + } + ] + }, + "Sockets": [ + 1, + 1, + 1, + 1 + ], + "SocketCount": 4, + "ConnectionStats": { + "Count": 4, + "Min": 0.001385875, + "Max": 0.001724375, + "Sum": 0.006404583, + "Avg": 0.00160114575, + "StdDev": 0.00013101857565508474, + "Data": [ + { + "Start": 0.001385875, + "End": 0.001724375, + "Percent": 100, + "Count": 4 + } + ], + "Percentiles": [ + { + "Percentile": 50, + "Value": 0.0014987083333333332 + }, + { + "Percentile": 75, + "Value": 0.0016115416666666667 + }, + { + "Percentile": 90, + "Value": 0.0016792416666666667 + }, + { + "Percentile": 95, + "Value": 0.0017018083333333333 + }, + { + "Percentile": 99, + "Value": 0.0017198616666666668 + }, + { + "Percentile": 99.9, + "Value": 0.0017239236666666668 + } + ] + }, + "AbortOn": 0 + } +}` + +const fortioDashboardJSON = `{"Endpoints":{"http://httpbin.default/get":{"Durations":[{"Version":"0","Bucket":"4.2 - 5","Value":5},{"Version":"0","Bucket":"5 - 6","Value":5},{"Version":"0","Bucket":"6 - 7","Value":4},{"Version":"0","Bucket":"7 - 8","Value":5},{"Version":"0","Bucket":"8 - 9","Value":5},{"Version":"0","Bucket":"9 - 10","Value":4},{"Version":"0","Bucket":"10 - 11","Value":5},{"Version":"0","Bucket":"11 - 12","Value":3},{"Version":"0","Bucket":"12 - 14","Value":12},{"Version":"0","Bucket":"14 - 16","Value":7},{"Version":"0","Bucket":"16 - 18","Value":10},{"Version":"0","Bucket":"18 - 20","Value":9},{"Version":"0","Bucket":"20 - 25","Value":11},{"Version":"0","Bucket":"25 - 30","Value":8},{"Version":"0","Bucket":"30 - 35","Value":5},{"Version":"0","Bucket":"35 - 40","Value":1},{"Version":"0","Bucket":"40 - 40.4","Value":1}],"Statistics":{"Count":100,"Mean":15.977100850000001,"StdDev":8.340658047253257,"Min":4.2238750000000005,"Max":40.490041999999995},"Error durations":[],"Error statistics":{"Count":0,"Mean":0,"StdDev":0,"Min":0,"Max":0},"Return codes":{"200":100}}},"ExperimentResult":{"Name":"my-name","Namespace":"my-namespace","Revision":0,"Start time":"01 Jan 01 00:00 UTC","Completed tasks":5,"Failure":false,"Insights":null,"Iter8 version":""}}` + +const ghzResultJSON = `{ + "routeguide.RouteGuide.GetFeature": { + "date": "2023-07-17T12:23:56Z", + "endReason": "normal", + "options": { + "call": "routeguide.RouteGuide.GetFeature", + "host": "routeguide.default:50051", + "proto": "/tmp/ghz.proto", + "import-paths": [ + "/tmp", + "." + ], + "insecure": true, + "load-schedule": "const", + "load-start": 0, + "load-end": 0, + "load-step": 0, + "load-step-duration": 0, + "load-max-duration": 0, + "concurrency": 50, + "concurrency-schedule": "const", + "concurrency-start": 1, + "concurrency-end": 0, + "concurrency-step": 0, + "concurrency-step-duration": 0, + "concurrency-max-duration": 0, + "total": 200, + "connections": 1, + "dial-timeout": 10000000000, + "data": { + "latitude": 407838351, + "longitude": -746143763 + }, + "binary": false, + "CPUs": 5, + "count-errors": true + }, + "count": 200, + "total": 592907667, + "average": 25208185, + "fastest": 32375, + "slowest": 195740917, + "rps": 337.3206506368217, + "errorDistribution": { + "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"": 200 + }, + "statusCodeDistribution": { + "Unavailable": 200 + }, + "latencyDistribution": [ + { + "percentage": 10, + "latency": 35584 + }, + { + "percentage": 25, + "latency": 39958 + }, + { + "percentage": 50, + "latency": 86208 + }, + { + "percentage": 75, + "latency": 12777625 + }, + { + "percentage": 90, + "latency": 106714334 + }, + { + "percentage": 95, + "latency": 189847000 + }, + { + "percentage": 99, + "latency": 195400792 + } + ], + "histogram": [ + { + "mark": 0.000032375, + "count": 1, + "frequency": 0.005 + }, + { + "mark": 0.0196032292, + "count": 167, + "frequency": 0.835 + }, + { + "mark": 0.0391740834, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.05874493759999999, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.07831579179999999, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.097886646, + "count": 3, + "frequency": 0.015 + }, + { + "mark": 0.11745750019999998, + "count": 13, + "frequency": 0.065 + }, + { + "mark": 0.1370283544, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.15659920859999998, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.17617006279999997, + "count": 0, + "frequency": 0 + }, + { + "mark": 0.195740917, + "count": 16, + "frequency": 0.08 + } + ], + "details": [ + { + "timestamp": "2023-07-17T12:23:56.089998719Z", + "latency": 14490041, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.090471886Z", + "latency": 13759125, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.090528678Z", + "latency": 194468542, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.090079886Z", + "latency": 105031291, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.090224928Z", + "latency": 100337083, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.091097053Z", + "latency": 12463750, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.091135844Z", + "latency": 12603875, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + }, + { + "timestamp": "2023-07-17T12:23:56.478469636Z", + "latency": 86208, + "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", + "status": "Unavailable" + } + ] + } +}` + +const ghzDashboardJSON = `{"Endpoints":{"routeguide.RouteGuide.GetFeature":{"Durations":[{"Version":"0","Bucket":"0.032","Value":1},{"Version":"0","Bucket":"19.603","Value":167},{"Version":"0","Bucket":"39.174","Value":0},{"Version":"0","Bucket":"58.744","Value":0},{"Version":"0","Bucket":"78.315","Value":0},{"Version":"0","Bucket":"97.886","Value":3},{"Version":"0","Bucket":"117.457","Value":13},{"Version":"0","Bucket":"137.028","Value":0},{"Version":"0","Bucket":"156.599","Value":0},{"Version":"0","Bucket":"176.17","Value":0},{"Version":"0","Bucket":"195.74","Value":16}],"Statistics":{"Count":200,"ErrorCount":200},"Status codes":{"Unavailable":200}}},"ExperimentResult":{"Name":"my-name","Namespace":"my-namespace","Revision":0,"Start time":"01 Jan 01 00:00 UTC","Completed tasks":5,"Failure":false,"Insights":null,"Iter8 version":""}}` + func TestStart(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() @@ -354,7 +838,13 @@ func TestGetHTTPDashboardHelper(t *testing.T) { err := json.Unmarshal([]byte(fortioResultJSON), &fortioResult) assert.NoError(t, err) - dashboard := getHTTPDashboardHelper(fortioResult) + experimentResult := util.ExperimentResult{ + Name: myName, + Namespace: myNamespace, + NumCompletedTasks: 5, + } + + dashboard := getHTTPDashboardHelper(fortioResult, experimentResult) assert.NotNil(t, dashboard) dashboardBytes, err := json.Marshal(dashboard) assert.NoError(t, err) @@ -366,12 +856,18 @@ func TestGetHTTPDashboardHelper(t *testing.T) { ) } -func TestGetGHZDashboardHelper(t *testing.T) { +func TestGetGRPCDashboardHelper(t *testing.T) { ghzResult := util.GHZResult{} err := json.Unmarshal([]byte(ghzResultJSON), &ghzResult) assert.NoError(t, err) - dashboard := getGRPCDashboardHelper(ghzResult) + experimentResult := util.ExperimentResult{ + Name: myName, + Namespace: myNamespace, + NumCompletedTasks: 5, + } + + dashboard := getGRPCDashboardHelper(ghzResult, experimentResult) assert.NotNil(t, dashboard) dashboardBytes, err := json.Marshal(dashboard) @@ -383,10 +879,10 @@ func TestGetGHZDashboardHelper(t *testing.T) { ) } -func TestPutResultInvalidMethod(t *testing.T) { +func TestPutPerformanceResultInvalidMethod(t *testing.T) { w := httptest.NewRecorder() req := httptest.NewRequest(http.MethodGet, util.PerformanceResultPath, nil) - putResult(w, req) + putPerformanceResult(w, req) res := w.Result() defer func() { err := res.Body.Close() @@ -395,7 +891,7 @@ func TestPutResultInvalidMethod(t *testing.T) { assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) } -func TestPutResultMissingParameter(t *testing.T) { +func TestPutPerformanceResultMissingParameter(t *testing.T) { tests := []struct { queryParams url.Values expectedStatusCode int @@ -427,7 +923,7 @@ func TestPutResultMissingParameter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, urlStr, nil) - putResult(w, req) + putPerformanceResult(w, req) res := w.Result() defer func() { err := res.Body.Close() @@ -438,7 +934,7 @@ func TestPutResultMissingParameter(t *testing.T) { } } -func TestPutResult(t *testing.T) { +func TestPutPerformanceResult(t *testing.T) { // instantiate metrics client tempDirPath := t.TempDir() client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) @@ -447,7 +943,7 @@ func TestPutResult(t *testing.T) { w := httptest.NewRecorder() - // construct inputs to putResult + // construct inputs to putPerformanceResult u, err := url.ParseRequestURI(util.PerformanceResultPath) assert.NoError(t, err) params := url.Values{ @@ -461,7 +957,7 @@ func TestPutResult(t *testing.T) { req := httptest.NewRequest(http.MethodPut, urlStr, bytes.NewBuffer([]byte(payload))) // put result into the metrics client - putResult(w, req) + putPerformanceResult(w, req) res := w.Result() defer func() { err := res.Body.Close() @@ -469,7 +965,7 @@ func TestPutResult(t *testing.T) { }() // check to see if the result is stored in the metrics client - result, err := abn.MetricsClient.GetResult("default", "default") + result, err := abn.MetricsClient.GetData("default", "default") assert.NoError(t, err) assert.Equal(t, payload, string(result)) } @@ -528,300 +1024,6 @@ func TestGetHTTPDashboardMissingParameter(t *testing.T) { } } -const fortioResultJSON = `{ - "EndpointResults": { - "http://httpbin.default/get": { - "RunType": "HTTP", - "Labels": "", - "StartTime": "2023-07-21T14:00:40.134434969Z", - "RequestedQPS": "8", - "RequestedDuration": "exactly 100 calls", - "ActualQPS": 7.975606391552989, - "ActualDuration": 12538231589, - "NumThreads": 4, - "Version": "1.57.3", - "DurationHistogram": { - "Count": 100, - "Min": 0.004223875, - "Max": 0.040490042, - "Sum": 1.5977100850000001, - "Avg": 0.015977100850000002, - "StdDev": 0.008340658047253256, - "Data": [ - { - "Start": 0.004223875, - "End": 0.005, - "Percent": 5, - "Count": 5 - }, - { - "Start": 0.005, - "End": 0.006, - "Percent": 10, - "Count": 5 - }, - { - "Start": 0.006, - "End": 0.007, - "Percent": 14, - "Count": 4 - }, - { - "Start": 0.007, - "End": 0.008, - "Percent": 19, - "Count": 5 - }, - { - "Start": 0.008, - "End": 0.009000000000000001, - "Percent": 24, - "Count": 5 - }, - { - "Start": 0.009000000000000001, - "End": 0.01, - "Percent": 28, - "Count": 4 - }, - { - "Start": 0.01, - "End": 0.011, - "Percent": 33, - "Count": 5 - }, - { - "Start": 0.011, - "End": 0.012, - "Percent": 36, - "Count": 3 - }, - { - "Start": 0.012, - "End": 0.014, - "Percent": 48, - "Count": 12 - }, - { - "Start": 0.014, - "End": 0.016, - "Percent": 55, - "Count": 7 - }, - { - "Start": 0.016, - "End": 0.018000000000000002, - "Percent": 65, - "Count": 10 - }, - { - "Start": 0.018000000000000002, - "End": 0.02, - "Percent": 74, - "Count": 9 - }, - { - "Start": 0.02, - "End": 0.025, - "Percent": 85, - "Count": 11 - }, - { - "Start": 0.025, - "End": 0.03, - "Percent": 93, - "Count": 8 - }, - { - "Start": 0.03, - "End": 0.035, - "Percent": 98, - "Count": 5 - }, - { - "Start": 0.035, - "End": 0.04, - "Percent": 99, - "Count": 1 - }, - { - "Start": 0.04, - "End": 0.040490042, - "Percent": 100, - "Count": 1 - } - ], - "Percentiles": [ - { - "Percentile": 50, - "Value": 0.014571428571428572 - }, - { - "Percentile": 75, - "Value": 0.020454545454545454 - }, - { - "Percentile": 90, - "Value": 0.028125 - }, - { - "Percentile": 95, - "Value": 0.032 - }, - { - "Percentile": 99, - "Value": 0.04 - }, - { - "Percentile": 99.9, - "Value": 0.0404410378 - } - ] - }, - "ErrorsDurationHistogram": { - "Count": 0, - "Min": 0, - "Max": 0, - "Sum": 0, - "Avg": 0, - "StdDev": 0, - "Data": null - }, - "Exactly": 100, - "Jitter": false, - "Uniform": false, - "NoCatchUp": false, - "RunID": 0, - "AccessLoggerInfo": "", - "ID": "2023-07-21-140040", - "RetCodes": { - "200": 100 - }, - "IPCountMap": { - "10.96.108.76:80": 4 - }, - "Insecure": false, - "MTLS": false, - "CACert": "", - "Cert": "", - "Key": "", - "UnixDomainSocket": "", - "URL": "http://httpbin.default/get", - "NumConnections": 1, - "Compression": false, - "DisableFastClient": false, - "HTTP10": false, - "H2": false, - "DisableKeepAlive": false, - "AllowHalfClose": false, - "FollowRedirects": false, - "Resolve": "", - "HTTPReqTimeOut": 3000000000, - "UserCredentials": "", - "ContentType": "", - "Payload": null, - "MethodOverride": "", - "LogErrors": false, - "SequentialWarmup": false, - "ConnReuseRange": [ - 0, - 0 - ], - "NoResolveEachConn": false, - "Offset": 0, - "Resolution": 0.001, - "Sizes": { - "Count": 100, - "Min": 413, - "Max": 413, - "Sum": 41300, - "Avg": 413, - "StdDev": 0, - "Data": [ - { - "Start": 413, - "End": 413, - "Percent": 100, - "Count": 100 - } - ] - }, - "HeaderSizes": { - "Count": 100, - "Min": 230, - "Max": 230, - "Sum": 23000, - "Avg": 230, - "StdDev": 0, - "Data": [ - { - "Start": 230, - "End": 230, - "Percent": 100, - "Count": 100 - } - ] - }, - "Sockets": [ - 1, - 1, - 1, - 1 - ], - "SocketCount": 4, - "ConnectionStats": { - "Count": 4, - "Min": 0.001385875, - "Max": 0.001724375, - "Sum": 0.006404583, - "Avg": 0.00160114575, - "StdDev": 0.00013101857565508474, - "Data": [ - { - "Start": 0.001385875, - "End": 0.001724375, - "Percent": 100, - "Count": 4 - } - ], - "Percentiles": [ - { - "Percentile": 50, - "Value": 0.0014987083333333332 - }, - { - "Percentile": 75, - "Value": 0.0016115416666666667 - }, - { - "Percentile": 90, - "Value": 0.0016792416666666667 - }, - { - "Percentile": 95, - "Value": 0.0017018083333333333 - }, - { - "Percentile": 99, - "Value": 0.0017198616666666668 - }, - { - "Percentile": 99.9, - "Value": 0.0017239236666666668 - } - ] - }, - "AbortOn": 0 - } - }, - "Summary": { - "numVersions": 1, - "versionNames": null - } -}` - -const fortioDashboardJSON = `{"Endpoints":{"http://httpbin.default/get":{"Durations":[{"Version":"0","Bucket":"4.2 - 5","Value":5},{"Version":"0","Bucket":"5 - 6","Value":5},{"Version":"0","Bucket":"6 - 7","Value":4},{"Version":"0","Bucket":"7 - 8","Value":5},{"Version":"0","Bucket":"8 - 9","Value":5},{"Version":"0","Bucket":"9 - 10","Value":4},{"Version":"0","Bucket":"10 - 11","Value":5},{"Version":"0","Bucket":"11 - 12","Value":3},{"Version":"0","Bucket":"12 - 14","Value":12},{"Version":"0","Bucket":"14 - 16","Value":7},{"Version":"0","Bucket":"16 - 18","Value":10},{"Version":"0","Bucket":"18 - 20","Value":9},{"Version":"0","Bucket":"20 - 25","Value":11},{"Version":"0","Bucket":"25 - 30","Value":8},{"Version":"0","Bucket":"30 - 35","Value":5},{"Version":"0","Bucket":"35 - 40","Value":1},{"Version":"0","Bucket":"40 - 40.4","Value":1}],"Statistics":{"Count":100,"Mean":15.977100850000001,"StdDev":8.340658047253257,"Min":4.2238750000000005,"Max":40.490041999999995},"Error durations":[],"Error statistics":{"Count":0,"Mean":0,"StdDev":0,"Min":0,"Max":0},"Return codes":{"200":100}}},"Summary":{"numVersions":1,"versionNames":null}}` - func TestGetHTTPDashboard(t *testing.T) { // instantiate metrics client tempDirPath := t.TempDir() @@ -829,8 +1031,19 @@ func TestGetHTTPDashboard(t *testing.T) { assert.NoError(t, err) abn.MetricsClient = client - // preload metric client with result - err = abn.MetricsClient.SetResult("default", "default", []byte(fortioResultJSON)) + // preload metric client with fortio result + err = abn.MetricsClient.SetData("default", "default", []byte(fortioResultJSON)) + assert.NoError(t, err) + + // preload metric client with experiment result + experimentResult := util.ExperimentResult{ + Name: myName, + Namespace: myNamespace, + NumCompletedTasks: 5, + } + experimentResultBytes, err := json.Marshal(experimentResult) + assert.NoError(t, err) + err = abn.MetricsClient.SetExperimentResult("default", "default", []byte(experimentResultBytes)) assert.NoError(t, err) w := httptest.NewRecorder() @@ -865,10 +1078,10 @@ func TestGetHTTPDashboard(t *testing.T) { ) } -func TestGetGHZDashboardInvalidMethod(t *testing.T) { +func TestGetGRPCDashboardInvalidMethod(t *testing.T) { w := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, util.PerformanceResultPath, nil) - putResult(w, req) + putPerformanceResult(w, req) res := w.Result() defer func() { err := res.Body.Close() @@ -877,7 +1090,7 @@ func TestGetGHZDashboardInvalidMethod(t *testing.T) { assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) } -func TestGetGHZDashboardMissingParameter(t *testing.T) { +func TestGetGRPCDashboardMissingParameter(t *testing.T) { tests := []struct { queryParams url.Values expectedStatusCode int @@ -909,7 +1122,7 @@ func TestGetGHZDashboardMissingParameter(t *testing.T) { req := httptest.NewRequest(http.MethodPut, urlStr, nil) - putResult(w, req) + putPerformanceResult(w, req) res := w.Result() defer func() { err := res.Body.Close() @@ -920,213 +1133,31 @@ func TestGetGHZDashboardMissingParameter(t *testing.T) { } } -const ghzResultJSON = `{ - "EndpointResults": { - "routeguide.RouteGuide.GetFeature": { - "date": "2023-07-17T12:23:56Z", - "endReason": "normal", - "options": { - "call": "routeguide.RouteGuide.GetFeature", - "host": "routeguide.default:50051", - "proto": "/tmp/ghz.proto", - "import-paths": [ - "/tmp", - "." - ], - "insecure": true, - "load-schedule": "const", - "load-start": 0, - "load-end": 0, - "load-step": 0, - "load-step-duration": 0, - "load-max-duration": 0, - "concurrency": 50, - "concurrency-schedule": "const", - "concurrency-start": 1, - "concurrency-end": 0, - "concurrency-step": 0, - "concurrency-step-duration": 0, - "concurrency-max-duration": 0, - "total": 200, - "connections": 1, - "dial-timeout": 10000000000, - "data": { - "latitude": 407838351, - "longitude": -746143763 - }, - "binary": false, - "CPUs": 5, - "count-errors": true - }, - "count": 200, - "total": 592907667, - "average": 25208185, - "fastest": 32375, - "slowest": 195740917, - "rps": 337.3206506368217, - "errorDistribution": { - "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"": 200 - }, - "statusCodeDistribution": { - "Unavailable": 200 - }, - "latencyDistribution": [ - { - "percentage": 10, - "latency": 35584 - }, - { - "percentage": 25, - "latency": 39958 - }, - { - "percentage": 50, - "latency": 86208 - }, - { - "percentage": 75, - "latency": 12777625 - }, - { - "percentage": 90, - "latency": 106714334 - }, - { - "percentage": 95, - "latency": 189847000 - }, - { - "percentage": 99, - "latency": 195400792 - } - ], - "histogram": [ - { - "mark": 0.000032375, - "count": 1, - "frequency": 0.005 - }, - { - "mark": 0.0196032292, - "count": 167, - "frequency": 0.835 - }, - { - "mark": 0.0391740834, - "count": 0, - "frequency": 0 - }, - { - "mark": 0.05874493759999999, - "count": 0, - "frequency": 0 - }, - { - "mark": 0.07831579179999999, - "count": 0, - "frequency": 0 - }, - { - "mark": 0.097886646, - "count": 3, - "frequency": 0.015 - }, - { - "mark": 0.11745750019999998, - "count": 13, - "frequency": 0.065 - }, - { - "mark": 0.1370283544, - "count": 0, - "frequency": 0 - }, - { - "mark": 0.15659920859999998, - "count": 0, - "frequency": 0 - }, - { - "mark": 0.17617006279999997, - "count": 0, - "frequency": 0 - }, - { - "mark": 0.195740917, - "count": 16, - "frequency": 0.08 - } - ], - "details": [ - { - "timestamp": "2023-07-17T12:23:56.089998719Z", - "latency": 14490041, - "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", - "status": "Unavailable" - }, - { - "timestamp": "2023-07-17T12:23:56.090471886Z", - "latency": 13759125, - "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", - "status": "Unavailable" - }, - { - "timestamp": "2023-07-17T12:23:56.090528678Z", - "latency": 194468542, - "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", - "status": "Unavailable" - }, - { - "timestamp": "2023-07-17T12:23:56.090079886Z", - "latency": 105031291, - "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", - "status": "Unavailable" - }, - { - "timestamp": "2023-07-17T12:23:56.090224928Z", - "latency": 100337083, - "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", - "status": "Unavailable" - }, - { - "timestamp": "2023-07-17T12:23:56.091097053Z", - "latency": 12463750, - "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", - "status": "Unavailable" - }, - { - "timestamp": "2023-07-17T12:23:56.091135844Z", - "latency": 12603875, - "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", - "status": "Unavailable" - }, - { - "timestamp": "2023-07-17T12:23:56.478469636Z", - "latency": 86208, - "error": "rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing: dial tcp 10.96.20.53:50051: connect: connection refused\"", - "status": "Unavailable" - } - ] - } - } -}` - -const ghzDashboardJSON = `{"Endpoints":{"routeguide.RouteGuide.GetFeature":{"Durations":[{"Version":"0","Bucket":"0.032","Value":1},{"Version":"0","Bucket":"19.603","Value":167},{"Version":"0","Bucket":"39.174","Value":0},{"Version":"0","Bucket":"58.744","Value":0},{"Version":"0","Bucket":"78.315","Value":0},{"Version":"0","Bucket":"97.886","Value":3},{"Version":"0","Bucket":"117.457","Value":13},{"Version":"0","Bucket":"137.028","Value":0},{"Version":"0","Bucket":"156.599","Value":0},{"Version":"0","Bucket":"176.17","Value":0},{"Version":"0","Bucket":"195.74","Value":16}],"Statistics":{"Count":200,"ErrorCount":200},"Status codes":{"Unavailable":200}}},"Summary":{"numVersions":0,"versionNames":null}}` - -func TestGetGHZDashboard(t *testing.T) { +func TestGetGRPCDashboard(t *testing.T) { // instantiate metrics client tempDirPath := t.TempDir() client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) assert.NoError(t, err) abn.MetricsClient = client - // preload metric client with result - err = abn.MetricsClient.SetResult("default", "default", []byte(ghzResultJSON)) + // preload metric client with ghz result + err = abn.MetricsClient.SetData("default", "default", []byte(ghzResultJSON)) + assert.NoError(t, err) + + // preload metric client with experiment result + experimentResult := util.ExperimentResult{ + Name: myName, + Namespace: myNamespace, + NumCompletedTasks: 5, + } + experimentResultBytes, err := json.Marshal(experimentResult) + assert.NoError(t, err) + err = abn.MetricsClient.SetExperimentResult("default", "default", []byte(experimentResultBytes)) assert.NoError(t, err) w := httptest.NewRecorder() - // construct inputs to getGHZDashboard + // construct inputs to getGRPCDashboard u, err := url.ParseRequestURI(util.PerformanceResultPath) assert.NoError(t, err) params := url.Values{ diff --git a/storage/badgerdb/simple.go b/storage/badgerdb/simple.go index 0a3a21e0a..274773caf 100644 --- a/storage/badgerdb/simple.go +++ b/storage/badgerdb/simple.go @@ -316,7 +316,7 @@ func (cl Client) GetMetrics(applicationName string, version int, signature strin } func getDataKey(namespace, experiment string) string { - // getResultKey() is just getUserPrefix() with the user appended at the end + // getDataKey() is just getUserPrefix() with the user appended at the end return fmt.Sprintf("kt-data::%s::%s", namespace, experiment) } @@ -353,14 +353,14 @@ func (cl Client) GetData(namespace, experiment string) ([]byte, error) { return valCopy, err } -func getResultKey(namespace, experiment string) string { - // getResultKey() is just getUserPrefix() with the user appended at the end +func getExperimentResultKey(namespace, experiment string) string { + // getExperimentResultKey() is just getUserPrefix() with the user appended at the end return fmt.Sprintf("kt-result::%s::%s", namespace, experiment) } // SetResult sets the experiment result for a particular namespace and experiment name func (cl Client) SetResult(namespace, experiment string, experimentResult *base.ExperimentResult) error { - key := getResultKey(namespace, experiment) + key := getExperimentResultKey(namespace, experiment) experimentResultJSON, err := json.Marshal(experimentResult) if err != nil { @@ -374,11 +374,82 @@ func (cl Client) SetResult(namespace, experiment string, experimentResult *base. }) } -// GetData returns the experiment result for a particular namespace and experiment name -func (cl Client) GetResults(namespace, experiment string) (*base.ExperimentResult, error) { +// // GetData returns the experiment result for a particular namespace and experiment name +// func (cl Client) GetExperimentResult(namespace, experiment string) (*base.ExperimentResult, error) { +// var valCopy []byte +// err := cl.db.View(func(txn *badger.Txn) error { +// item, err := txn.Get([]byte(getExperimentResultKey(namespace, experiment))) +// if err != nil { +// return err +// } + +// valCopy, err = item.ValueCopy(nil) +// if err != nil { +// return err +// } + +// return nil +// }) +// if err != nil { +// return nil, err +// } + +// experimentResult := base.ExperimentResult{} +// err = json.Unmarshal(valCopy, &experimentResult) +// if err != nil { +// return nil, err +// } + +// return &experimentResult, err +// } + +// // GetData returns the experiment result for a particular namespace and experiment name +// func (cl Client) GetExperimentResult(namespace, experiment string) ([]byte, error) { +// var valCopy []byte +// err := cl.db.View(func(txn *badger.Txn) error { +// item, err := txn.Get([]byte(getExperimentResultKey(namespace, experiment))) +// if err != nil { +// return err +// } + +// valCopy, err = item.ValueCopy(nil) +// if err != nil { +// return err +// } + +// return nil +// }) +// if err != nil { +// return nil, err +// } + +// experimentResult := base.ExperimentResult{} +// err = json.Unmarshal(valCopy, &experimentResult) +// if err != nil { +// return nil, err +// } + +// return &experimentResult, err +// } + +// SetExperimentResult sets arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment name +// the data is []byte in order to make this function reusable for different tasks +func (cl Client) SetExperimentResult(namespace, experiment string, data []byte) error { + key := getExperimentResultKey(namespace, experiment) + + return cl.db.Update(func(txn *badger.Txn) error { + e := badger.NewEntry([]byte(key), data).WithTTL(cl.additionalOptions.TTL) + err := txn.SetEntry(e) + return err + }) +} + +// GetExperimentResult returns arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment name +// the data is []byte in order to make this function reusable for different tasks +func (cl Client) GetExperimentResult(namespace, experiment string) ([]byte, error) { var valCopy []byte err := cl.db.View(func(txn *badger.Txn) error { - item, err := txn.Get([]byte(getResultKey(namespace, experiment))) + item, err := txn.Get([]byte(getExperimentResultKey(namespace, experiment))) if err != nil { return err } @@ -390,15 +461,6 @@ func (cl Client) GetResults(namespace, experiment string) (*base.ExperimentResul return nil }) - if err != nil { - return nil, err - } - experimentResult := base.ExperimentResult{} - err = json.Unmarshal(valCopy, &experimentResult) - if err != nil { - return nil, err - } - - return &experimentResult, err + return valCopy, err } diff --git a/storage/badgerdb/simple_test.go b/storage/badgerdb/simple_test.go index 4d8f3329d..148b4faee 100644 --- a/storage/badgerdb/simple_test.go +++ b/storage/badgerdb/simple_test.go @@ -283,12 +283,12 @@ func TestSetResult(t *testing.T) { experiment := "my-experiment" data := "hello world" - err = client.SetResult(namespace, experiment, []byte(data)) + err = client.SetData(namespace, experiment, []byte(data)) assert.NoError(t, err) // get result err = client.db.View(func(txn *badger.Txn) error { - key := getResultKey(namespace, experiment) + key := getDataKey(namespace, experiment) item, err := txn.Get([]byte(key)) assert.NoError(t, err) assert.NotNil(t, item) @@ -314,10 +314,10 @@ func TestGetResult(t *testing.T) { experiment := "my-experiment" data := "hello world" - err = client.SetResult(namespace, experiment, []byte(data)) + err = client.SetData(namespace, experiment, []byte(data)) assert.NoError(t, err) - result, err := client.GetResult(namespace, experiment) + result, err := client.GetData(namespace, experiment) assert.NoError(t, err) assert.Equal(t, data, string(result)) } diff --git a/storage/interface.go b/storage/interface.go index 05abcf00a..b0578515a 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -1,8 +1,6 @@ // Package storage provides the storage client for the controllers package package storage -import "github.com/iter8-tools/iter8/base" - // SummarizedMetric is a metric summary type SummarizedMetric struct { Count uint64 @@ -72,10 +70,17 @@ type Interface interface { // Example key: kt-data::my-namespace::my-experiment-name -> per endpoint JSON data + summary SetData(namespace, experiment string, data []byte) error + // // get ExperimentResult for a particular namespace and experiment + // GetResults(namespace, experiment string) (*base.ExperimentResult, error) + + // // called by the A/B/n SDK gRPC API implementation (SDK for application clients) + // // Example key: kt-metric::my-app::0::my-signature::my-metric::my-user::my-transaction-id -> my-metric-value (get the metric value with all the provided information) + // SetResult(namespace, experiment string, experimentResult *base.ExperimentResult) error + // get ExperimentResult for a particular namespace and experiment - GetResults(namespace, experiment string) (*base.ExperimentResult, error) + GetExperimentResult(namespace, experiment string) ([]byte, error) // called by the A/B/n SDK gRPC API implementation (SDK for application clients) // Example key: kt-metric::my-app::0::my-signature::my-metric::my-user::my-transaction-id -> my-metric-value (get the metric value with all the provided information) - SetResult(namespace, experiment string, experimentResult *base.ExperimentResult) error + SetExperimentResult(namespace, experiment string, data []byte) error } From 23113fee7d60c8ea3da5deb27a6d6fda23244f79 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Fri, 18 Aug 2023 06:46:23 -0400 Subject: [PATCH 067/121] Cannot cast interface into HTTPResult Signed-off-by: Alan Cha --- .github/workflows/assets/grpcurl.yaml | 152 +++++++++++++ action/launch_test.go | 3 - action/run_test.go | 192 ++++++++--------- base/collect_grpc.go | 20 +- base/collect_http.go | 46 +--- base/experiment.go | 24 +-- base/metrics.go | 11 +- base/test_helpers.go | 14 -- driver/filedriver.go | 16 +- driver/kubedriver.go | 16 +- metrics/server.go | 297 ++++++++++---------------- metrics/server_test.go | 291 +++++++++++++------------ storage/badgerdb/simple.go | 139 ++---------- storage/badgerdb/simple_test.go | 44 +--- storage/interface.go | 19 +- 15 files changed, 584 insertions(+), 700 deletions(-) create mode 100644 .github/workflows/assets/grpcurl.yaml diff --git a/.github/workflows/assets/grpcurl.yaml b/.github/workflows/assets/grpcurl.yaml new file mode 100644 index 000000000..b0b52100d --- /dev/null +++ b/.github/workflows/assets/grpcurl.yaml @@ -0,0 +1,152 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sleep +spec: + replicas: 1 + selector: + matchLabels: + app: sleep + template: + metadata: + labels: + app: sleep + spec: + containers: + - name: sleep + image: fullstorydev/grpcurl:latest-alpine + command: ["/bin/sh", "-c", "source command.sh"] + workingDir: /demo + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config-volume + mountPath: /demo + volumes: + - name: config-volume + configMap: + name: demo-input +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: demo-input +data: + route_guide.proto: | + // Copyright 2015 gRPC authors. + // + // Licensed under the Apache License, Version 2.0 (the "License"); + // you may not use this file except in compliance with the License. + // You may obtain a copy of the License at + // + // http://www.apache.org/licenses/LICENSE-2.0 + // + // Unless required by applicable law or agreed to in writing, software + // distributed under the License is distributed on an "AS IS" BASIS, + // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + // See the License for the specific language governing permissions and + // limitations under the License. + + syntax = "proto3"; + + option go_package = "google.golang.org/grpc/examples/route_guide/routeguide"; + option java_multiple_files = true; + option java_package = "io.grpc.examples.routeguide"; + option java_outer_classname = "RouteGuideProto"; + + package routeguide; + + // Interface exported by the server. + service RouteGuide { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + rpc GetFeature(Point) returns (Feature) {} + + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} + + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} + + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} + } + + // Points are represented as latitude-longitude pairs in the E7 representation + // (degrees multiplied by 10**7 and rounded to the nearest integer). + // Latitudes should be in the range +/- 90 degrees and longitude should be in + // the range +/- 180 degrees (inclusive). + message Point { + int32 latitude = 1; + int32 longitude = 2; + } + + // A latitude-longitude rectangle, represented as two diagonally opposite + // points "lo" and "hi". + message Rectangle { + // One corner of the rectangle. + Point lo = 1; + + // The other corner of the rectangle. + Point hi = 2; + } + + // A feature names something at a given point. + // + // If a feature could not be named, the name is empty. + message Feature { + // The name of the feature. + string name = 1; + + // The point where the feature is detected. + Point location = 2; + } + + // A RouteNote is a message sent while at a given point. + message RouteNote { + // The location from which the message is sent. + Point location = 1; + + // The message to be sent. + string message = 2; + } + + // A RouteSummary is received in response to a RecordRoute rpc. + // + // It contains the number of individual points received, the number of + // detected features, and the total distance covered as the cumulative sum of + // the distance between each point. + message RouteSummary { + // The number of points received. + int32 point_count = 1; + + // The number of known features passed while traversing the route. + int32 feature_count = 2; + + // The distance covered in metres. + int32 distance = 3; + + // The duration of the traversal in seconds. + int32 elapsed_time = 4; + } + unary.json: | + { + "latitude": 407838351, + "longitude": -746143763 + } + command.sh: | + cat unary.json | grpcurl -plaintext -proto route_guide.proto -d @ routeguide:50051 routeguide.RouteGuide.GetFeature \ No newline at end of file diff --git a/action/launch_test.go b/action/launch_test.go index 7ba10974b..c5705a3ea 100644 --- a/action/launch_test.go +++ b/action/launch_test.go @@ -22,9 +22,6 @@ func TestKubeLaunch(t *testing.T) { err = lOpts.KubeRun() assert.NoError(t, err) - // x, _ := json.Marshal(lOpts) - // fmt.Println(string(x)) - fmt.Println(lOpts.Group) fmt.Println(lOpts.Releases) diff --git a/action/run_test.go b/action/run_test.go index ec467c64a..551a44260 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -1,98 +1,98 @@ package action -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "testing" - - "fortio.org/fortio/fhttp" - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - myName = "myName" - myNamespace = "myNamespace" -) - -func TestKubeRun(t *testing.T) { - // define METRICS_SERVER_URL - metricsServerURL := "http://iter8.default:8080" - err := os.Setenv(base.MetricsServerURL, metricsServerURL) - assert.NoError(t, err) - - // create and configure HTTP endpoint for testing - mux, addr := fhttp.DynamicHTTPServer(false) - url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) - var verifyHandlerCalled bool - mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) - - // mock metrics server - base.StartHTTPMock(t) - metricsServerCalled := false - base.MockMetricsServer(base.MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := base.HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - if _, ok := bodyFortioResult[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) - } - }, - }) - - _ = os.Chdir(t.TempDir()) - - // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, driver.ExperimentPath) - - // fix rOpts - rOpts := NewRunOpts(driver.NewFakeKubeDriver(cli.New())) - - // read experiment from file created above - byteArray, _ := os.ReadFile(driver.ExperimentPath) - _, _ = rOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - err = rOpts.KubeRun() - assert.NoError(t, err) - // sanity check -- handler was called - assert.True(t, verifyHandlerCalled) - assert.True(t, metricsServerCalled) - - // check results - exp, err := base.BuildExperiment(rOpts.KubeDriver) - assert.NoError(t, err) - assert.True(t, exp.Completed()) - assert.True(t, exp.NoFailure()) - assert.Equal(t, 1, exp.Result.NumCompletedTasks) - -} +// import ( +// "context" +// "encoding/json" +// "fmt" +// "io" +// "net/http" +// "os" +// "testing" + +// "fortio.org/fortio/fhttp" +// "github.com/iter8-tools/iter8/base" +// "github.com/iter8-tools/iter8/driver" +// "github.com/stretchr/testify/assert" +// "helm.sh/helm/v3/pkg/cli" +// corev1 "k8s.io/api/core/v1" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// ) + +// const ( +// myName = "myName" +// myNamespace = "myNamespace" +// ) + +// func TestKubeRun(t *testing.T) { +// // define METRICS_SERVER_URL +// metricsServerURL := "http://iter8.default:8080" +// err := os.Setenv(base.MetricsServerURL, metricsServerURL) +// assert.NoError(t, err) + +// // create and configure HTTP endpoint for testing +// mux, addr := fhttp.DynamicHTTPServer(false) +// url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) +// var verifyHandlerCalled bool +// mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + +// // mock metrics server +// base.StartHTTPMock(t) +// metricsServerCalled := false +// base.MockMetricsServer(base.MockMetricsServerInput{ +// MetricsServerURL: metricsServerURL, +// PerformanceResultCallback: func(req *http.Request) { +// metricsServerCalled = true + +// // check query parameters +// assert.Equal(t, myName, req.URL.Query().Get("experiment")) +// assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + +// // check payload +// body, err := io.ReadAll(req.Body) +// assert.NoError(t, err) +// assert.NotNil(t, body) + +// // check payload content +// bodyFortioResult := base.HTTPResult{} +// err = json.Unmarshal(body, &bodyFortioResult) +// assert.NoError(t, err) +// assert.NotNil(t, body) + +// if _, ok := bodyFortioResult[url]; !ok { +// assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) +// } +// }, +// }) + +// _ = os.Chdir(t.TempDir()) + +// // create experiment.yaml +// base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, driver.ExperimentPath) + +// // fix rOpts +// rOpts := NewRunOpts(driver.NewFakeKubeDriver(cli.New())) + +// // read experiment from file created above +// byteArray, _ := os.ReadFile(driver.ExperimentPath) +// _, _ = rOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "default", +// Namespace: "default", +// }, +// StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, +// }, metav1.CreateOptions{}) + +// err = rOpts.KubeRun() +// assert.NoError(t, err) +// // sanity check -- handler was called +// assert.True(t, verifyHandlerCalled) +// assert.True(t, metricsServerCalled) + +// // check results +// exp, err := base.BuildExperiment(rOpts.KubeDriver) +// assert.NoError(t, err) +// assert.True(t, exp.Completed()) +// assert.True(t, exp.NoFailure()) +// assert.Equal(t, 1, exp.Result.NumCompletedTasks) + +// } diff --git a/base/collect_grpc.go b/base/collect_grpc.go index 8f63243a0..619665d23 100644 --- a/base/collect_grpc.go +++ b/base/collect_grpc.go @@ -2,7 +2,6 @@ package base import ( "fmt" - "os" "time" "github.com/bojand/ghz/runner" @@ -67,11 +66,11 @@ func (t *collectGRPCTask) validateInputs() error { } // resultForVersion collects gRPC test result for a given version -func (t *collectGRPCTask) resultForVersion() (map[string]*runner.Report, error) { +func (t *collectGRPCTask) resultForVersion() (GHZResult, error) { // the main idea is to run ghz with proper options var err error - results := map[string]*runner.Report{} + results := GHZResult{} if len(t.With.Endpoints) > 0 { log.Logger.Trace("multiple endpoints") @@ -151,22 +150,13 @@ func (t *collectGRPCTask) run(exp *Experiment) error { return nil } - // 3. Init insights with num versions: always 1 in this task + // 3. init insights with num versions: always 1 in this task if err = exp.Result.initInsightsWithNumVersions(1); err != nil { return err } - // get URL of metrics server from environment variable - metricsServerURL, ok := os.LookupEnv(MetricsServerURL) - if !ok { - errorMessage := "could not look up METRICS_SERVER_URL environment variable" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } - - if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, data); err != nil { - return err - } + // 4. write data to Insights + exp.Result.Insights.TaskData[CollectGRPCTaskName] = data return nil } diff --git a/base/collect_http.go b/base/collect_http.go index 340dca439..f538b2f9e 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -1,7 +1,6 @@ package base import ( - "encoding/json" "fmt" "io" "os" @@ -82,31 +81,6 @@ var ( defaultPercentiles = [...]float64{50.0, 75.0, 90.0, 95.0, 99.0, 99.9} ) -// errorCode checks if a given code is an error code -func (t *collectHTTPTask) errorCode(code int) bool { - // connection failure - if code == -1 { - return true - } - - // HTTP errors - for _, lims := range t.With.ErrorRanges { - // if no lower limit (check upper) - if lims.Lower == nil && code <= *lims.Upper { - return true - } - // if no upper limit (check lower) - if lims.Upper == nil && code >= *lims.Lower { - return true - } - // if both limits are present (check both) - if lims.Upper != nil && lims.Lower != nil && code <= *lims.Upper && code >= *lims.Lower { - return true - } - } - return false -} - // collectHTTPTask enables load testing of HTTP services. type collectHTTPTask struct { // TaskMeta has fields common to all tasks @@ -214,11 +188,11 @@ func getFortioOptions(c endpoint) (*fhttp.HTTPRunnerOptions, error) { // func (t *collectHTTPTask) getFortioResults() (*fhttp.HTTPRunnerResults, error) { // key is the metric prefix // key is the endpoint -func (t *collectHTTPTask) getFortioResults() (map[string]*fhttp.HTTPRunnerResults, error) { +func (t *collectHTTPTask) getFortioResults() (HTTPResult, error) { // the main idea is to run Fortio with proper options var err error - results := map[string]*fhttp.HTTPRunnerResults{} + results := HTTPResult{} if len(t.With.Endpoints) > 0 { log.Logger.Trace("multiple endpoints") for endpointID, endpoint := range t.With.Endpoints { @@ -300,20 +274,8 @@ func (t *collectHTTPTask) run(exp *Experiment) error { return err } - result, _ := json.Marshal(exp.Result) - log.Logger.Trace("before fortioResult", string(result)) - - // get URL of metrics server from environment variable - metricsServerURL, ok := os.LookupEnv(MetricsServerURL) - if !ok { - errorMessage := "could not look up METRICS_SERVER_URL environment variable" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } - - if err = putPerformanceResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, data); err != nil { - return err - } + // write data to Insights + exp.Result.Insights.TaskData[CollectHTTPTaskName] = data return nil } diff --git a/base/experiment.go b/base/experiment.go index c8919f8b5..a8589f49d 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "fmt" - "os" "github.com/antonmedv/expr" log "github.com/iter8-tools/iter8/base/log" @@ -86,6 +85,9 @@ type Insights struct { // VersionNames is list of version identifiers if known VersionNames []VersionInfo `json:"versionNames" yaml:"versionNames"` + + // TaskData is a map of task names to the data produced by said task + TaskData map[string]interface{} `json:"taskData" yaml:"taskData"` } // VersionInfo is basic information about a version @@ -283,15 +285,6 @@ func (exp *Experiment) NoFailure() bool { func (exp *Experiment) run(driver Driver) error { var err error - // TODO: reduce repetition, create package local variable and do validation - // get URL of metrics server from environment variable - metricsServerURL, ok := os.LookupEnv(MetricsServerURL) - if !ok { - errorMessage := "could not look up METRICS_SERVER_URL environment variable" - log.Logger.Error(errorMessage) - return fmt.Errorf(errorMessage) - } - exp.driver = driver if exp.Result == nil { err = errors.New("experiment with nil result section cannot be run") @@ -328,16 +321,10 @@ func (exp *Experiment) run(driver Driver) error { log.Logger.Error("task " + fmt.Sprintf("%v: %v", i+1, *getName(t)) + ": " + "failure") exp.failExperiment() - // TODO: remove err = driver.Write(exp) if err != nil { return err } - err = putExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) - if err != nil { - return err - } - return err } log.Logger.Info("task " + fmt.Sprintf("%v: %v", i+1, *getName(t)) + ": " + "completed") } else { @@ -346,15 +333,10 @@ func (exp *Experiment) run(driver Driver) error { exp.incrementNumCompletedTasks() - // TODO: remove err = driver.Write(exp) if err != nil { return err } - err = putExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) - if err != nil { - return err - } } return nil } diff --git a/base/metrics.go b/base/metrics.go index d7c990ee4..eb25396b1 100644 --- a/base/metrics.go +++ b/base/metrics.go @@ -17,8 +17,6 @@ const ( // MetricsPath is the path to the GET /metrics endpoint MetricsPath = "/metrics" - // PerformanceResultPath is the path to the PUT /performanceResult endpoint - PerformanceResultPath = "/performanceResult" // ExperimentResultPath is the path to the PUT /experimentResult endpoint ExperimentResultPath = "/experimentResult" // HTTPDashboardPath is the path to the GET /httpDashboard endpoint @@ -80,14 +78,7 @@ func callMetricsService(method, metricsServerURL, path string, queryParams map[s return nil } -func putPerformanceResultToMetricsService(metricsServerURL, namespace, experiment string, data interface{}) error { - return callMetricsService(http.MethodPut, metricsServerURL, PerformanceResultPath, map[string]string{ - "namespace": namespace, - "experiment": experiment, - }, data) -} - -func putExperimentResultToMetricsService(metricsServerURL, namespace, experiment string, experimentResult *ExperimentResult) error { +func PutExperimentResultToMetricsService(metricsServerURL, namespace, experiment string, experimentResult *ExperimentResult) error { return callMetricsService(http.MethodPut, metricsServerURL, ExperimentResultPath, map[string]string{ "namespace": namespace, "experiment": experiment, diff --git a/base/test_helpers.go b/base/test_helpers.go index f855a039d..97233a9b7 100644 --- a/base/test_helpers.go +++ b/base/test_helpers.go @@ -81,8 +81,6 @@ type MetricsServerCallback func(req *http.Request) type MockMetricsServerInput struct { MetricsServerURL string - // PUT /performanceResult - PerformanceResultCallback MetricsServerCallback // PUT /experimentResult ExperimentResultCallback MetricsServerCallback // GET /grpcDashboard @@ -94,18 +92,6 @@ type MockMetricsServerInput struct { // MockMetricsServer is a mock metrics server // use the callback functions in the MockMetricsServerInput to test if those endpoints are called func MockMetricsServer(input MockMetricsServerInput) { - // PUT /performanceResult - httpmock.RegisterResponder( - http.MethodPut, - input.MetricsServerURL+PerformanceResultPath, - func(req *http.Request) (*http.Response, error) { - if input.PerformanceResultCallback != nil { - input.PerformanceResultCallback(req) - } - return httpmock.NewStringResponse(200, "success"), nil - }, - ) - // PUT /experimentResult httpmock.RegisterResponder( http.MethodPut, diff --git a/driver/filedriver.go b/driver/filedriver.go index 0ab011b20..739f59f75 100644 --- a/driver/filedriver.go +++ b/driver/filedriver.go @@ -2,12 +2,12 @@ package driver import ( "errors" + "fmt" "os" "path" "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/base/log" - "sigs.k8s.io/yaml" ) // FileDriver enables reading and writing experiment spec and result files @@ -28,11 +28,17 @@ func (f *FileDriver) Read() (*base.Experiment, error) { // Write the experiment func (f *FileDriver) Write(exp *base.Experiment) error { - b, _ := yaml.Marshal(exp) - err := os.WriteFile(path.Join(f.RunDir, ExperimentPath), b, 0600) + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(base.MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } + + err := base.PutExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) if err != nil { - log.Logger.WithStackTrace(err.Error()).Error("unable to write experiment") - return errors.New("unable to write experiment") + return err } return nil } diff --git a/driver/kubedriver.go b/driver/kubedriver.go index 4debb6be4..02a0ac409 100644 --- a/driver/kubedriver.go +++ b/driver/kubedriver.go @@ -250,10 +250,18 @@ func (kd *KubeDriver) updateExperimentSecret(e *base.Experiment) error { } // Write writes a Kubernetes experiment -func (kd *KubeDriver) Write(e *base.Experiment) error { - if err := kd.updateExperimentSecret(e); err != nil { - log.Logger.WithStackTrace(err.Error()).Error("unable to write experiment") - return errors.New("unable to write experiment") +func (kd *KubeDriver) Write(exp *base.Experiment) error { + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(base.MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } + + err := base.PutExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) + if err != nil { + return err } return nil } diff --git a/metrics/server.go b/metrics/server.go index cb09458cd..c9e3aaad6 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -150,7 +150,6 @@ func Start(stopCh <-chan struct{}) error { // configure endpoints http.HandleFunc(util.MetricsPath, getMetrics) - http.HandleFunc(util.PerformanceResultPath, putPerformanceResult) http.HandleFunc(util.ExperimentResultPath, putExperimentResult) http.HandleFunc(util.HTTPDashboardPath, getHTTPDashboard) http.HandleFunc(util.GRPCDashboardPath, getGRPCDashboard) @@ -491,148 +490,39 @@ func getHTTPEndpointRow(httpRunnerResults *fhttp.HTTPRunnerResults) httpEndpoint return row } -func getHTTPDashboardHelper(fortioResult util.HTTPResult, experimentResult base.ExperimentResult) httpDashboard { +func getHTTPDashboardHelper(experimentResult *base.ExperimentResult) httpDashboard { dashboard := httpDashboard{ Endpoints: map[string]httpEndpointRow{}, + ExperimentResult: dashboardExperimentResult{ + Name: experimentResult.Name, + Namespace: experimentResult.Namespace, + Revision: experimentResult.Revision, + StartTime: experimentResult.StartTime.Time.Format(timeFormat), + NumCompletedTasks: experimentResult.NumCompletedTasks, + Failure: experimentResult.Failure, + Iter8Version: experimentResult.Iter8Version, + }, } - for endpoint, endpointResult := range fortioResult { - endpointResult := endpointResult - dashboard.Endpoints[endpoint] = getHTTPEndpointRow(endpointResult) - } - - experimentResultJSON, _ := json.Marshal(experimentResult) - fmt.Println(string(experimentResultJSON)) - - dashboard.ExperimentResult = dashboardExperimentResult{ - Name: experimentResult.Name, - Namespace: experimentResult.Namespace, - Revision: experimentResult.Revision, - StartTime: experimentResult.StartTime.Time.Format(timeFormat), - NumCompletedTasks: experimentResult.NumCompletedTasks, - Failure: experimentResult.Failure, - Insights: experimentResult.Insights, - Iter8Version: experimentResult.Iter8Version, - } - - return dashboard -} - -// putPerformanceResult handles PUT /performanceResult with query parameter application=namespace/name -func putPerformanceResult(w http.ResponseWriter, r *http.Request) { - log.Logger.Trace("putPerformanceResult called") - defer log.Logger.Trace("putPerformanceResult completed") - - // verify method - if r.Method != http.MethodPut { - http.Error(w, "expected PUT", http.StatusMethodNotAllowed) - return - } - - // verify request (query parameters) - namespace := r.URL.Query().Get("namespace") - if namespace == "" { - http.Error(w, "no namespace specified", http.StatusBadRequest) - return - } - - experiment := r.URL.Query().Get("experiment") - if experiment == "" { - http.Error(w, "no experiment specified", http.StatusBadRequest) - return - } - - log.Logger.Tracef("putPerformanceResult called for namespace %s and experiment %s", namespace, experiment) - - defer func() { - err := r.Body.Close() - if err != nil { - errorMessage := fmt.Sprintf("cannot close request body: %e", err) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusBadRequest) - return - } - }() - body, err := io.ReadAll(r.Body) - if err != nil { - errorMessage := fmt.Sprintf("cannot read request body: %e", err) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusBadRequest) - return - } - - if abn.MetricsClient == nil { - http.Error(w, "no metrics client", http.StatusInternalServerError) - return - } - err = abn.MetricsClient.SetData(namespace, experiment, body) - if err != nil { - errorMessage := fmt.Sprintf("cannot store result in storage client: %s: %e", string(body), err) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusInternalServerError) - return - } - - // TODO: 201 for new resource, 200 for update -} - -// putExperimentResult handles PUT /experimentResult with query parameter application=namespace/name -func putExperimentResult(w http.ResponseWriter, r *http.Request) { - log.Logger.Trace("putResult called") - defer log.Logger.Trace("putResult completed") - - // verify method - if r.Method != http.MethodPut { - http.Error(w, "expected PUT", http.StatusMethodNotAllowed) - return - } - - // verify request (query parameters) - namespace := r.URL.Query().Get("namespace") - if namespace == "" { - http.Error(w, "no namespace specified", http.StatusBadRequest) - return - } - - experiment := r.URL.Query().Get("experiment") - if experiment == "" { - http.Error(w, "no experiment specified", http.StatusBadRequest) - return - } - - log.Logger.Tracef("putResult called for namespace %s and experiment %s", namespace, experiment) - - defer func() { - err := r.Body.Close() - if err != nil { - errorMessage := fmt.Sprintf("cannot close request body: %e", err) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusBadRequest) - return - } - }() - body, err := io.ReadAll(r.Body) - if err != nil { - errorMessage := fmt.Sprintf("cannot read request body: %e", err) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusBadRequest) - return + // get raw data from ExperimentResult + httpTaskData := experimentResult.Insights.TaskData[util.CollectHTTPTaskName] + if httpTaskData == nil { + return dashboard } - if abn.MetricsClient == nil { - http.Error(w, "no metrics client", http.StatusInternalServerError) - return + // cast the raw data (task data) into HTTPResult + httpResult, ok := httpTaskData.(util.HTTPResult) + if !ok { + return dashboard } - err = abn.MetricsClient.SetExperimentResult(namespace, experiment, body) - if err != nil { - errorMessage := fmt.Sprintf("cannot store result in storage client: %s: %e", string(body), err) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusInternalServerError) - return + // form rows of dashboard + for endpoint, endpointResult := range httpResult { + endpointResult := endpointResult + dashboard.Endpoints[endpoint] = getHTTPEndpointRow(endpointResult) } - // TODO: 201 for new resource, 200 for update + return dashboard } // getHTTPDashboard handles GET /getHTTPDashboard with query parameter application=namespace/name @@ -666,25 +556,9 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { http.Error(w, "no metrics client", http.StatusInternalServerError) return } - fortioResultsBytes, err := abn.MetricsClient.GetData(namespace, experiment) - if err != nil { - errorMessage := fmt.Sprintf("cannot get Fortio result with namespace %s, experiment %s", namespace, experiment) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusBadRequest) - return - } - - fortioResult := util.HTTPResult{} - err = json.Unmarshal(fortioResultsBytes, &fortioResult) - if err != nil { - errorMessage := fmt.Sprintf("cannot JSON unmarshal FortioResult: \"%s\"", string(fortioResultsBytes)) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusInternalServerError) - return - } // get experimentResult from metrics client - experimentResultBytes, err := abn.MetricsClient.GetExperimentResult(namespace, experiment) + experimentResult, err := abn.MetricsClient.GetExperimentResult(namespace, experiment) if err != nil { errorMessage := fmt.Sprintf("cannot get experiment result with namespace %s, experiment %s", namespace, experiment) log.Logger.Error(errorMessage) @@ -692,17 +566,8 @@ func getHTTPDashboard(w http.ResponseWriter, r *http.Request) { return } - experimentResult := util.ExperimentResult{} - err = json.Unmarshal(experimentResultBytes, &experimentResult) - if err != nil { - errorMessage := fmt.Sprintf("cannot JSON unmarshal ExperimentResult: \"%s\"", string(experimentResultBytes)) - log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusInternalServerError) - return - } - // JSON marshal the dashboard - dashboardBytes, err := json.Marshal(getHTTPDashboardHelper(fortioResult, experimentResult)) + dashboardBytes, err := json.Marshal(getHTTPDashboardHelper(experimentResult)) if err != nil { errorMessage := "cannot JSON marshal HTTP dashboard" log.Logger.Error(errorMessage) @@ -755,27 +620,38 @@ func getGRPCEndpointRow(ghzRunnerReport *runner.Report) ghzEndpointRow { return row } -func getGRPCDashboardHelper(ghzResult util.GHZResult, experimentResult base.ExperimentResult) ghzDashboard { +func getGRPCDashboardHelper(experimentResult *base.ExperimentResult) ghzDashboard { dashboard := ghzDashboard{ Endpoints: map[string]ghzEndpointRow{}, + ExperimentResult: dashboardExperimentResult{ + Name: experimentResult.Name, + Namespace: experimentResult.Namespace, + Revision: experimentResult.Revision, + StartTime: experimentResult.StartTime.Time.Format(timeFormat), + NumCompletedTasks: experimentResult.NumCompletedTasks, + Failure: experimentResult.Failure, + Iter8Version: experimentResult.Iter8Version, + }, } + // get raw data from ExperimentResult + httpTaskData := experimentResult.Insights.TaskData[util.CollectHTTPTaskName] + if httpTaskData == nil { + return dashboard + } + + // cast the raw data (task data) into HTTPResult + ghzResult, ok := httpTaskData.(util.GHZResult) + if !ok { + return dashboard + } + + // form rows of dashboard for endpoint, endpointResult := range ghzResult { endpointResult := endpointResult dashboard.Endpoints[endpoint] = getGRPCEndpointRow(endpointResult) } - dashboard.ExperimentResult = dashboardExperimentResult{ - Name: experimentResult.Name, - Namespace: experimentResult.Namespace, - Revision: experimentResult.Revision, - StartTime: experimentResult.StartTime.Time.Format(timeFormat), - NumCompletedTasks: experimentResult.NumCompletedTasks, - Failure: experimentResult.Failure, - Insights: experimentResult.Insights, - Iter8Version: experimentResult.Iter8Version, - } - return dashboard } @@ -809,51 +685,94 @@ func getGRPCDashboard(w http.ResponseWriter, r *http.Request) { http.Error(w, "no metrics client", http.StatusInternalServerError) return } - ghzResultBytes, err := abn.MetricsClient.GetData(namespace, experiment) + + // get experimentResult from metrics client + experimentResult, err := abn.MetricsClient.GetExperimentResult(namespace, experiment) if err != nil { - errorMessage := fmt.Sprintf("cannot get ghz result with namespace %s, experiment %s", namespace, experiment) + errorMessage := fmt.Sprintf("cannot get experiment result with namespace %s, experiment %s", namespace, experiment) log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusBadRequest) return } - ghzResult := util.GHZResult{} - err = json.Unmarshal(ghzResultBytes, &ghzResult) + // JSON marshal the dashboard + dashboardBytes, err := json.Marshal(getGRPCDashboardHelper(experimentResult)) if err != nil { - errorMessage := fmt.Sprintf("cannot JSON unmarshal GHZResult: \"%s\"", string(ghzResultBytes)) + errorMessage := "cannot JSON marshal gRPC dashboard" log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusInternalServerError) return } - // get experimentResult from metrics client - experimentResultBytes, err := abn.MetricsClient.GetExperimentResult(namespace, experiment) + // finally, send response + w.Header().Add("Content-Type", "application/json") + _, _ = w.Write(dashboardBytes) +} + +// putExperimentResult handles PUT /experimentResult with query parameter application=namespace/name +func putExperimentResult(w http.ResponseWriter, r *http.Request) { + log.Logger.Trace("putResult called") + defer log.Logger.Trace("putResult completed") + + // verify method + if r.Method != http.MethodPut { + http.Error(w, "expected PUT", http.StatusMethodNotAllowed) + return + } + + // verify request (query parameters) + namespace := r.URL.Query().Get("namespace") + if namespace == "" { + http.Error(w, "no namespace specified", http.StatusBadRequest) + return + } + + experiment := r.URL.Query().Get("experiment") + if experiment == "" { + http.Error(w, "no experiment specified", http.StatusBadRequest) + return + } + + log.Logger.Tracef("putResult called for namespace %s and experiment %s", namespace, experiment) + + defer func() { + err := r.Body.Close() + if err != nil { + errorMessage := fmt.Sprintf("cannot close request body: %e", err) + log.Logger.Error(errorMessage) + http.Error(w, errorMessage, http.StatusBadRequest) + return + } + }() + body, err := io.ReadAll(r.Body) if err != nil { - errorMessage := fmt.Sprintf("cannot get experiment result with namespace %s, experiment %s", namespace, experiment) + errorMessage := fmt.Sprintf("cannot read request body: %e", err) log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusBadRequest) return } experimentResult := util.ExperimentResult{} - err = json.Unmarshal(experimentResultBytes, &experimentResult) + err = json.Unmarshal(body, &experimentResult) if err != nil { - errorMessage := fmt.Sprintf("cannot JSON unmarshal ExperimentResult: \"%s\"", string(experimentResultBytes)) + errorMessage := fmt.Sprintf("cannot unmarshal body into ExperimentResult: %s: %e", string(body), err) log.Logger.Error(errorMessage) - http.Error(w, errorMessage, http.StatusInternalServerError) + http.Error(w, errorMessage, http.StatusBadRequest) return } - // JSON marshal the dashboard - dashboardBytes, err := json.Marshal(getGRPCDashboardHelper(ghzResult, experimentResult)) + if abn.MetricsClient == nil { + http.Error(w, "no metrics client", http.StatusInternalServerError) + return + } + + err = abn.MetricsClient.SetExperimentResult(namespace, experiment, &experimentResult) if err != nil { - errorMessage := "cannot JSON marshal gRPC dashboard" + errorMessage := fmt.Sprintf("cannot store result in storage client: %s: %e", string(body), err) log.Logger.Error(errorMessage) http.Error(w, errorMessage, http.StatusInternalServerError) return } - // finally, send response - w.Header().Add("Content-Type", "application/json") - _, _ = w.Write(dashboardBytes) + // TODO: 201 for new resource, 200 for update } diff --git a/metrics/server_test.go b/metrics/server_test.go index 618156f3b..9cb650015 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -842,9 +842,14 @@ func TestGetHTTPDashboardHelper(t *testing.T) { Name: myName, Namespace: myNamespace, NumCompletedTasks: 5, + Insights: &util.Insights{ + TaskData: map[string]interface{}{ + util.CollectHTTPTaskName: fortioResult, + }, + }, } - dashboard := getHTTPDashboardHelper(fortioResult, experimentResult) + dashboard := getHTTPDashboardHelper(&experimentResult) assert.NotNil(t, dashboard) dashboardBytes, err := json.Marshal(dashboard) assert.NoError(t, err) @@ -865,9 +870,14 @@ func TestGetGRPCDashboardHelper(t *testing.T) { Name: myName, Namespace: myNamespace, NumCompletedTasks: 5, + Insights: &util.Insights{ + TaskData: map[string]interface{}{ + util.CollectHTTPTaskName: ghzResult, + }, + }, } - dashboard := getGRPCDashboardHelper(ghzResult, experimentResult) + dashboard := getGRPCDashboardHelper(&experimentResult) assert.NotNil(t, dashboard) dashboardBytes, err := json.Marshal(dashboard) @@ -879,10 +889,10 @@ func TestGetGRPCDashboardHelper(t *testing.T) { ) } -func TestPutPerformanceResultInvalidMethod(t *testing.T) { +func TestPutExperimentResultInvalidMethod(t *testing.T) { w := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodGet, util.PerformanceResultPath, nil) - putPerformanceResult(w, req) + req := httptest.NewRequest(http.MethodGet, util.ExperimentResultPath, nil) + putExperimentResult(w, req) res := w.Result() defer func() { err := res.Body.Close() @@ -891,7 +901,7 @@ func TestPutPerformanceResultInvalidMethod(t *testing.T) { assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) } -func TestPutPerformanceResultMissingParameter(t *testing.T) { +func TestPutExperimentResultMissingParameter(t *testing.T) { tests := []struct { queryParams url.Values expectedStatusCode int @@ -916,14 +926,14 @@ func TestPutPerformanceResultMissingParameter(t *testing.T) { for _, test := range tests { w := httptest.NewRecorder() - u, err := url.ParseRequestURI(util.PerformanceResultPath) + u, err := url.ParseRequestURI(util.ExperimentResultPath) assert.NoError(t, err) u.RawQuery = test.queryParams.Encode() urlStr := fmt.Sprintf("%v", u) req := httptest.NewRequest(http.MethodPut, urlStr, nil) - putPerformanceResult(w, req) + putExperimentResult(w, req) res := w.Result() defer func() { err := res.Body.Close() @@ -934,7 +944,7 @@ func TestPutPerformanceResultMissingParameter(t *testing.T) { } } -func TestPutPerformanceResult(t *testing.T) { +func TestPutExperimentResult(t *testing.T) { // instantiate metrics client tempDirPath := t.TempDir() client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) @@ -943,8 +953,8 @@ func TestPutPerformanceResult(t *testing.T) { w := httptest.NewRecorder() - // construct inputs to putPerformanceResult - u, err := url.ParseRequestURI(util.PerformanceResultPath) + // construct inputs to putExperimentResult + u, err := url.ParseRequestURI(util.ExperimentResultPath) assert.NoError(t, err) params := url.Values{ "namespace": {"default"}, @@ -953,11 +963,24 @@ func TestPutPerformanceResult(t *testing.T) { u.RawQuery = params.Encode() urlStr := fmt.Sprintf("%v", u) - payload := `{"hello":"world"}` - req := httptest.NewRequest(http.MethodPut, urlStr, bytes.NewBuffer([]byte(payload))) + experimentResult := util.ExperimentResult{ + Name: myName, + Namespace: myNamespace, + NumCompletedTasks: 5, + Insights: &util.Insights{ + TaskData: map[string]interface{}{ + util.CollectHTTPTaskName: "hello world", + }, + }, + } + + experimentResultBytes, err := json.Marshal(experimentResult) + assert.NoError(t, err) + + req := httptest.NewRequest(http.MethodPut, urlStr, bytes.NewBuffer(experimentResultBytes)) // put result into the metrics client - putPerformanceResult(w, req) + putExperimentResult(w, req) res := w.Result() defer func() { err := res.Body.Close() @@ -965,9 +988,9 @@ func TestPutPerformanceResult(t *testing.T) { }() // check to see if the result is stored in the metrics client - result, err := abn.MetricsClient.GetData("default", "default") + result, err := abn.MetricsClient.GetExperimentResult("default", "default") assert.NoError(t, err) - assert.Equal(t, payload, string(result)) + assert.Equal(t, &experimentResult, result) } func TestGetHTTPDashboardInvalidMethod(t *testing.T) { @@ -1007,7 +1030,7 @@ func TestGetHTTPDashboardMissingParameter(t *testing.T) { for _, test := range tests { w := httptest.NewRecorder() - u, err := url.ParseRequestURI(util.PerformanceResultPath) + u, err := url.ParseRequestURI(util.HTTPDashboardPath) assert.NoError(t, err) u.RawQuery = test.queryParams.Encode() urlStr := fmt.Sprintf("%v", u) @@ -1031,25 +1054,29 @@ func TestGetHTTPDashboard(t *testing.T) { assert.NoError(t, err) abn.MetricsClient = client - // preload metric client with fortio result - err = abn.MetricsClient.SetData("default", "default", []byte(fortioResultJSON)) + // preload metric client with experiment result + fortioResult := util.HTTPResult{} + err = json.Unmarshal([]byte(fortioResultJSON), &fortioResult) assert.NoError(t, err) - // preload metric client with experiment result experimentResult := util.ExperimentResult{ Name: myName, Namespace: myNamespace, NumCompletedTasks: 5, + Insights: &util.Insights{ + TaskData: map[string]interface{}{ + util.CollectHTTPTaskName: fortioResult, + }, + }, } - experimentResultBytes, err := json.Marshal(experimentResult) - assert.NoError(t, err) - err = abn.MetricsClient.SetExperimentResult("default", "default", []byte(experimentResultBytes)) + + err = abn.MetricsClient.SetExperimentResult("default", "default", &experimentResult) assert.NoError(t, err) w := httptest.NewRecorder() // construct inputs to getHTTPDashboard - u, err := url.ParseRequestURI(util.PerformanceResultPath) + u, err := url.ParseRequestURI(util.HTTPDashboardPath) assert.NoError(t, err) params := url.Values{ "namespace": {"default"}, @@ -1078,111 +1105,111 @@ func TestGetHTTPDashboard(t *testing.T) { ) } -func TestGetGRPCDashboardInvalidMethod(t *testing.T) { - w := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodPost, util.PerformanceResultPath, nil) - putPerformanceResult(w, req) - res := w.Result() - defer func() { - err := res.Body.Close() - assert.NoError(t, err) - }() - assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) -} - -func TestGetGRPCDashboardMissingParameter(t *testing.T) { - tests := []struct { - queryParams url.Values - expectedStatusCode int - }{ - { - expectedStatusCode: http.StatusBadRequest, - }, - { - queryParams: url.Values{ - "namespace": {"default"}, - }, - expectedStatusCode: http.StatusBadRequest, - }, - { - queryParams: url.Values{ - "experiment": {"default"}, - }, - expectedStatusCode: http.StatusBadRequest, - }, - } - - for _, test := range tests { - w := httptest.NewRecorder() - - u, err := url.ParseRequestURI(util.PerformanceResultPath) - assert.NoError(t, err) - u.RawQuery = test.queryParams.Encode() - urlStr := fmt.Sprintf("%v", u) - - req := httptest.NewRequest(http.MethodPut, urlStr, nil) - - putPerformanceResult(w, req) - res := w.Result() - defer func() { - err := res.Body.Close() - assert.NoError(t, err) - }() - - assert.Equal(t, test.expectedStatusCode, res.StatusCode) - } -} - -func TestGetGRPCDashboard(t *testing.T) { - // instantiate metrics client - tempDirPath := t.TempDir() - client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) - assert.NoError(t, err) - abn.MetricsClient = client - - // preload metric client with ghz result - err = abn.MetricsClient.SetData("default", "default", []byte(ghzResultJSON)) - assert.NoError(t, err) - - // preload metric client with experiment result - experimentResult := util.ExperimentResult{ - Name: myName, - Namespace: myNamespace, - NumCompletedTasks: 5, - } - experimentResultBytes, err := json.Marshal(experimentResult) - assert.NoError(t, err) - err = abn.MetricsClient.SetExperimentResult("default", "default", []byte(experimentResultBytes)) - assert.NoError(t, err) - - w := httptest.NewRecorder() - - // construct inputs to getGRPCDashboard - u, err := url.ParseRequestURI(util.PerformanceResultPath) - assert.NoError(t, err) - params := url.Values{ - "namespace": {"default"}, - "experiment": {"default"}, - } - u.RawQuery = params.Encode() - urlStr := fmt.Sprintf("%v", u) - - req := httptest.NewRequest(http.MethodGet, urlStr, nil) - - // get ghz dashboard based on result in metrics client - getGRPCDashboard(w, req) - res := w.Result() - defer func() { - err := res.Body.Close() - assert.NoError(t, err) - }() - - // check the ghz dashboard - body, err := io.ReadAll(res.Body) - assert.NoError(t, err) - assert.Equal( - t, - ghzDashboardJSON, - string(body), - ) -} +// func TestGetGRPCDashboardInvalidMethod(t *testing.T) { +// w := httptest.NewRecorder() +// req := httptest.NewRequest(http.MethodPost, util.PerformanceResultPath, nil) +// putPerformanceResult(w, req) +// res := w.Result() +// defer func() { +// err := res.Body.Close() +// assert.NoError(t, err) +// }() +// assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) +// } + +// func TestGetGRPCDashboardMissingParameter(t *testing.T) { +// tests := []struct { +// queryParams url.Values +// expectedStatusCode int +// }{ +// { +// expectedStatusCode: http.StatusBadRequest, +// }, +// { +// queryParams: url.Values{ +// "namespace": {"default"}, +// }, +// expectedStatusCode: http.StatusBadRequest, +// }, +// { +// queryParams: url.Values{ +// "experiment": {"default"}, +// }, +// expectedStatusCode: http.StatusBadRequest, +// }, +// } + +// for _, test := range tests { +// w := httptest.NewRecorder() + +// u, err := url.ParseRequestURI(util.PerformanceResultPath) +// assert.NoError(t, err) +// u.RawQuery = test.queryParams.Encode() +// urlStr := fmt.Sprintf("%v", u) + +// req := httptest.NewRequest(http.MethodPut, urlStr, nil) + +// putPerformanceResult(w, req) +// res := w.Result() +// defer func() { +// err := res.Body.Close() +// assert.NoError(t, err) +// }() + +// assert.Equal(t, test.expectedStatusCode, res.StatusCode) +// } +// } + +// func TestGetGRPCDashboard(t *testing.T) { +// // instantiate metrics client +// tempDirPath := t.TempDir() +// client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) +// assert.NoError(t, err) +// abn.MetricsClient = client + +// // preload metric client with ghz result +// err = abn.MetricsClient.SetData("default", "default", []byte(ghzResultJSON)) +// assert.NoError(t, err) + +// // preload metric client with experiment result +// experimentResult := util.ExperimentResult{ +// Name: myName, +// Namespace: myNamespace, +// NumCompletedTasks: 5, +// } +// experimentResultBytes, err := json.Marshal(experimentResult) +// assert.NoError(t, err) +// err = abn.MetricsClient.SetExperimentResult("default", "default", []byte(experimentResultBytes)) +// assert.NoError(t, err) + +// w := httptest.NewRecorder() + +// // construct inputs to getGRPCDashboard +// u, err := url.ParseRequestURI(util.PerformanceResultPath) +// assert.NoError(t, err) +// params := url.Values{ +// "namespace": {"default"}, +// "experiment": {"default"}, +// } +// u.RawQuery = params.Encode() +// urlStr := fmt.Sprintf("%v", u) + +// req := httptest.NewRequest(http.MethodGet, urlStr, nil) + +// // get ghz dashboard based on result in metrics client +// getGRPCDashboard(w, req) +// res := w.Result() +// defer func() { +// err := res.Body.Close() +// assert.NoError(t, err) +// }() + +// // check the ghz dashboard +// body, err := io.ReadAll(res.Body) +// assert.NoError(t, err) +// assert.Equal( +// t, +// ghzDashboardJSON, +// string(body), +// ) +// } diff --git a/storage/badgerdb/simple.go b/storage/badgerdb/simple.go index 274773caf..d88223ba2 100644 --- a/storage/badgerdb/simple.go +++ b/storage/badgerdb/simple.go @@ -315,152 +315,53 @@ func (cl Client) GetMetrics(applicationName string, version int, signature strin return &metrics, nil } -func getDataKey(namespace, experiment string) string { - // getDataKey() is just getUserPrefix() with the user appended at the end - return fmt.Sprintf("kt-data::%s::%s", namespace, experiment) -} - -// SetData sets arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment name -// the data is []byte in order to make this function reusable for different tasks -func (cl Client) SetData(namespace, experiment string, data []byte) error { - key := getDataKey(namespace, experiment) - - return cl.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte(key), data).WithTTL(cl.additionalOptions.TTL) - err := txn.SetEntry(e) - return err - }) -} - -// GetData returns arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment name -// the data is []byte in order to make this function reusable for different tasks -func (cl Client) GetData(namespace, experiment string) ([]byte, error) { - var valCopy []byte - err := cl.db.View(func(txn *badger.Txn) error { - item, err := txn.Get([]byte(getDataKey(namespace, experiment))) - if err != nil { - return err - } - - valCopy, err = item.ValueCopy(nil) - if err != nil { - return err - } - - return nil - }) - - return valCopy, err -} - func getExperimentResultKey(namespace, experiment string) string { // getExperimentResultKey() is just getUserPrefix() with the user appended at the end return fmt.Sprintf("kt-result::%s::%s", namespace, experiment) } -// SetResult sets the experiment result for a particular namespace and experiment name -func (cl Client) SetResult(namespace, experiment string, experimentResult *base.ExperimentResult) error { - key := getExperimentResultKey(namespace, experiment) - - experimentResultJSON, err := json.Marshal(experimentResult) +// SetExperimentResult sets the experiment result for a particular namespace and experiment name +// the data is []byte in order to make this function reusable for different tasks +func (cl Client) SetExperimentResult(namespace, experiment string, data *base.ExperimentResult) error { + dataBytes, err := json.Marshal(data) if err != nil { - return err + return fmt.Errorf("cannot JSON marshal ExperimentResult: %e", err) } - return cl.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte(key), []byte(experimentResultJSON)).WithTTL(cl.additionalOptions.TTL) - err := txn.SetEntry(e) - return err - }) -} - -// // GetData returns the experiment result for a particular namespace and experiment name -// func (cl Client) GetExperimentResult(namespace, experiment string) (*base.ExperimentResult, error) { -// var valCopy []byte -// err := cl.db.View(func(txn *badger.Txn) error { -// item, err := txn.Get([]byte(getExperimentResultKey(namespace, experiment))) -// if err != nil { -// return err -// } - -// valCopy, err = item.ValueCopy(nil) -// if err != nil { -// return err -// } - -// return nil -// }) -// if err != nil { -// return nil, err -// } - -// experimentResult := base.ExperimentResult{} -// err = json.Unmarshal(valCopy, &experimentResult) -// if err != nil { -// return nil, err -// } - -// return &experimentResult, err -// } - -// // GetData returns the experiment result for a particular namespace and experiment name -// func (cl Client) GetExperimentResult(namespace, experiment string) ([]byte, error) { -// var valCopy []byte -// err := cl.db.View(func(txn *badger.Txn) error { -// item, err := txn.Get([]byte(getExperimentResultKey(namespace, experiment))) -// if err != nil { -// return err -// } - -// valCopy, err = item.ValueCopy(nil) -// if err != nil { -// return err -// } - -// return nil -// }) -// if err != nil { -// return nil, err -// } - -// experimentResult := base.ExperimentResult{} -// err = json.Unmarshal(valCopy, &experimentResult) -// if err != nil { -// return nil, err -// } - -// return &experimentResult, err -// } - -// SetExperimentResult sets arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment name -// the data is []byte in order to make this function reusable for different tasks -func (cl Client) SetExperimentResult(namespace, experiment string, data []byte) error { key := getExperimentResultKey(namespace, experiment) - return cl.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte(key), data).WithTTL(cl.additionalOptions.TTL) + e := badger.NewEntry([]byte(key), dataBytes).WithTTL(cl.additionalOptions.TTL) err := txn.SetEntry(e) return err }) } -// GetExperimentResult returns arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment name +// GetExperimentResult sets the experiment result for a particular namespace and experiment name // the data is []byte in order to make this function reusable for different tasks -func (cl Client) GetExperimentResult(namespace, experiment string) ([]byte, error) { +func (cl Client) GetExperimentResult(namespace, experiment string) (*base.ExperimentResult, error) { var valCopy []byte err := cl.db.View(func(txn *badger.Txn) error { item, err := txn.Get([]byte(getExperimentResultKey(namespace, experiment))) if err != nil { - return err + return fmt.Errorf("cannot get ExperimentResult with name: \"%s\" and namespace: %s: %e", experiment, namespace, err) } valCopy, err = item.ValueCopy(nil) if err != nil { - return err + return fmt.Errorf("cannot copy value of ExperimentResult with name: \"%s\" and namespace: %s: %e", experiment, namespace, err) } return nil }) + if err != nil { + return nil, err + } + + experimentResult := base.ExperimentResult{} + err = json.Unmarshal(valCopy, &experimentResult) + if err != nil { + return nil, fmt.Errorf("cannot JSON unmarshal ExperimentResult: \"%s\": %e", string(valCopy), err) + } - return valCopy, err + return &experimentResult, err } diff --git a/storage/badgerdb/simple_test.go b/storage/badgerdb/simple_test.go index 148b4faee..8b737ecdf 100644 --- a/storage/badgerdb/simple_test.go +++ b/storage/badgerdb/simple_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/dgraph-io/badger/v4" + "github.com/iter8-tools/iter8/base" "github.com/stretchr/testify/assert" ) @@ -273,7 +274,7 @@ func TestGetMetrics(t *testing.T) { assert.Equal(t, "{}", string(jsonMetrics)) } -func TestSetResult(t *testing.T) { +func TestGetExperimentResult(t *testing.T) { tempDirPath := t.TempDir() client, err := GetClient(badger.DefaultOptions(tempDirPath), AdditionalOptions{}) @@ -281,43 +282,16 @@ func TestSetResult(t *testing.T) { namespace := "my-namespace" experiment := "my-experiment" - data := "hello world" - err = client.SetData(namespace, experiment, []byte(data)) - assert.NoError(t, err) - - // get result - err = client.db.View(func(txn *badger.Txn) error { - key := getDataKey(namespace, experiment) - item, err := txn.Get([]byte(key)) - assert.NoError(t, err) - assert.NotNil(t, item) - - err = item.Value(func(val []byte) error { - assert.Equal(t, data, string(val)) - return nil - }) - assert.NoError(t, err) - - return nil - }) - assert.NoError(t, err) -} - -func TestGetResult(t *testing.T) { - tempDirPath := t.TempDir() - - client, err := GetClient(badger.DefaultOptions(tempDirPath), AdditionalOptions{}) - assert.NoError(t, err) - - namespace := "my-namespace" - experiment := "my-experiment" - data := "hello world" + experimentResult := base.ExperimentResult{ + Name: experiment, + Namespace: namespace, + } - err = client.SetData(namespace, experiment, []byte(data)) + err = client.SetExperimentResult(namespace, experiment, &experimentResult) assert.NoError(t, err) - result, err := client.GetData(namespace, experiment) + result, err := client.GetExperimentResult(namespace, experiment) assert.NoError(t, err) - assert.Equal(t, data, string(result)) + assert.Equal(t, &experimentResult, result) } diff --git a/storage/interface.go b/storage/interface.go index b0578515a..e75669196 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -1,6 +1,8 @@ // Package storage provides the storage client for the controllers package package storage +import "github.com/iter8-tools/iter8/base" + // SummarizedMetric is a metric summary type SummarizedMetric struct { Count uint64 @@ -64,23 +66,10 @@ type Interface interface { // Example key: kt-users::my-app::0::my-signature::my-user -> true SetUser(applicationName string, version int, signature, user string) error - // returns arbitrary data (such as HTTP/gRPC results) for a particular namespace and experiment - GetData(namespace, experiment string) ([]byte, error) - - // Example key: kt-data::my-namespace::my-experiment-name -> per endpoint JSON data + summary - SetData(namespace, experiment string, data []byte) error - - // // get ExperimentResult for a particular namespace and experiment - // GetResults(namespace, experiment string) (*base.ExperimentResult, error) - - // // called by the A/B/n SDK gRPC API implementation (SDK for application clients) - // // Example key: kt-metric::my-app::0::my-signature::my-metric::my-user::my-transaction-id -> my-metric-value (get the metric value with all the provided information) - // SetResult(namespace, experiment string, experimentResult *base.ExperimentResult) error - // get ExperimentResult for a particular namespace and experiment - GetExperimentResult(namespace, experiment string) ([]byte, error) + GetExperimentResult(namespace, experiment string) (*base.ExperimentResult, error) // called by the A/B/n SDK gRPC API implementation (SDK for application clients) // Example key: kt-metric::my-app::0::my-signature::my-metric::my-user::my-transaction-id -> my-metric-value (get the metric value with all the provided information) - SetExperimentResult(namespace, experiment string, data []byte) error + SetExperimentResult(namespace, experiment string, data *base.ExperimentResult) error } From 57332515125adee13f3a34b821ce73aaf6a75178 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Sun, 20 Aug 2023 11:09:54 -0400 Subject: [PATCH 068/121] Only write experimentResult to metrics store Signed-off-by: Alan Cha --- base/collect_grpc_test.go | 191 ++++++++----------------- base/collect_http.go | 24 ++++ base/collect_http_test.go | 283 ++++++++------------------------------ base/experiment.go | 1 + base/experiment_test.go | 40 +----- base/test_helpers.go | 19 ++- cmd/kassert_test.go | 17 +-- cmd/krun_test.go | 22 --- driver/filedriver.go | 14 +- driver/filedriver_test.go | 12 +- driver/kubedriver.go | 11 +- driver/kubedriver_test.go | 15 +- metrics/server.go | 31 +++-- metrics/server_test.go | 223 +++++++++++++++--------------- 14 files changed, 328 insertions(+), 575 deletions(-) diff --git a/base/collect_grpc_test.go b/base/collect_grpc_test.go index 64170c422..ad77293db 100644 --- a/base/collect_grpc_test.go +++ b/base/collect_grpc_test.go @@ -2,9 +2,6 @@ package base import ( "encoding/json" - "fmt" - "io" - "net/http" "os" "strings" "testing" @@ -34,35 +31,6 @@ func TestRunCollectGRPCUnary(t *testing.T) { call := "helloworld.Greeter.SayHello" - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - if _, ok := bodyFortioResult[call]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", call)) - } - }, - }) - _ = os.Chdir(t.TempDir()) callType := helloworld.Unary gs, s, err := internal.StartServer(false) @@ -102,10 +70,21 @@ func TestRunCollectGRPCUnary(t *testing.T) { assert.NoError(t, err) assert.Equal(t, exp.Result.Insights.NumVersions, 1) - assert.True(t, metricsServerCalled) count := gs.GetCount(callType) assert.Equal(t, 200, count) + + taskData := exp.Result.Insights.TaskData[CollectGRPCTaskName] + assert.NotNil(t, taskData) + + taskDataBytes, err := json.Marshal(taskData) + assert.NoError(t, err) + ghzResult := GHZResult{} + err = json.Unmarshal(taskDataBytes, &ghzResult) + assert.NoError(t, err) + + assert.Equal(t, 1, len(ghzResult)) + assert.NotNil(t, ghzResult[call]) } // If the endpoint does not exist, fail gracefully @@ -147,47 +126,6 @@ func TestRunCollectGRPCMultipleEndpoints(t *testing.T) { err := os.Setenv(MetricsServerURL, metricsServerURL) assert.NoError(t, err) - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - if _, ok := bodyFortioResult[unary]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", unary)) - } - - if _, ok := bodyFortioResult[server]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", server)) - } - - if _, ok := bodyFortioResult[client]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", client)) - } - - if _, ok := bodyFortioResult[bidirectional]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", bidirectional)) - } - }, - }) - _ = os.Chdir(t.TempDir()) callType := helloworld.Unary gs, s, err := internal.StartServer(false) @@ -243,12 +181,29 @@ func TestRunCollectGRPCMultipleEndpoints(t *testing.T) { assert.NoError(t, err) assert.Equal(t, exp.Result.Insights.NumVersions, 1) - assert.True(t, metricsServerCalled) count := gs.GetCount(callType) assert.Equal(t, 200, count) + + taskData := exp.Result.Insights.TaskData[CollectGRPCTaskName] + assert.NotNil(t, taskData) + + taskDataBytes, err := json.Marshal(taskData) + assert.NoError(t, err) + ghzResult := GHZResult{} + err = json.Unmarshal(taskDataBytes, &ghzResult) + assert.NoError(t, err) + + assert.Equal(t, 4, len(ghzResult)) + assert.NotNil(t, ghzResult[unary]) + assert.NotNil(t, ghzResult[server]) + assert.NotNil(t, ghzResult[client]) + assert.NotNil(t, ghzResult[bidirectional]) } +// TODO: should this still return insights even though the endpoints cannot be reached? +// This would mean no Grafana dashboard would be produced +// // If the endpoints cannot be reached, then do not throw an error // Should not return an nil pointer dereference error (see #1451) func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { @@ -262,31 +217,6 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { clientCall := "helloworld.Greeter.SayHellos" bidirectionalCall := "helloworld.Greeter.SayHelloBidi" - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.Equal(t, `{}`, string(body)) - }, - }) - // valid collect GRPC task... should succeed ct := &collectGRPCTask{ TaskMeta: TaskMeta{ @@ -330,7 +260,17 @@ func TestRunCollectGRPCMultipleNoEndpoints(t *testing.T) { exp.initResults(1) err = ct.run(exp) assert.NoError(t, err) - assert.True(t, metricsServerCalled) + + taskData := exp.Result.Insights.TaskData[CollectGRPCTaskName] + assert.NotNil(t, taskData) + + taskDataBytes, err := json.Marshal(taskData) + assert.NoError(t, err) + ghzResult := GHZResult{} + err = json.Unmarshal(taskDataBytes, &ghzResult) + assert.NoError(t, err) + + assert.Equal(t, 0, len(ghzResult)) } func TestRunCollectGRPCSingleEndpointMultipleCalls(t *testing.T) { @@ -339,41 +279,6 @@ func TestRunCollectGRPCSingleEndpointMultipleCalls(t *testing.T) { err := os.Setenv(MetricsServerURL, metricsServerURL) assert.NoError(t, err) - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - fmt.Println(string(body)) - - if _, ok := bodyFortioResult[unary]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", unary)) - } - - if _, ok := bodyFortioResult[unary2]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", unary2)) - } - }, - }) - _ = os.Chdir(t.TempDir()) callType := helloworld.Unary gs, s, err := internal.StartServer(false) @@ -420,8 +325,20 @@ func TestRunCollectGRPCSingleEndpointMultipleCalls(t *testing.T) { assert.NoError(t, err) assert.Equal(t, exp.Result.Insights.NumVersions, 1) - assert.True(t, metricsServerCalled) count := gs.GetCount(callType) assert.Equal(t, 400, count) + + taskData := exp.Result.Insights.TaskData[CollectGRPCTaskName] + assert.NotNil(t, taskData) + + taskDataBytes, err := json.Marshal(taskData) + assert.NoError(t, err) + ghzResult := GHZResult{} + err = json.Unmarshal(taskDataBytes, &ghzResult) + assert.NoError(t, err) + + assert.Equal(t, 2, len(ghzResult)) + assert.NotNil(t, ghzResult[unary]) + assert.NotNil(t, ghzResult[unary2]) } diff --git a/base/collect_http.go b/base/collect_http.go index f538b2f9e..02394fa35 100644 --- a/base/collect_http.go +++ b/base/collect_http.go @@ -81,6 +81,30 @@ var ( defaultPercentiles = [...]float64{50.0, 75.0, 90.0, 95.0, 99.0, 99.9} ) +// errorCode checks if a given code is an error code +func (t *collectHTTPTask) errorCode(code int) bool { + // connection failure + if code == -1 { + return true + } + // HTTP errors + for _, lims := range t.With.ErrorRanges { + // if no lower limit (check upper) + if lims.Lower == nil && code <= *lims.Upper { + return true + } + // if no upper limit (check lower) + if lims.Upper == nil && code >= *lims.Lower { + return true + } + // if both limits are present (check both) + if lims.Upper != nil && lims.Lower != nil && code <= *lims.Upper && code >= *lims.Lower { + return true + } + } + return false +} + // collectHTTPTask enables load testing of HTTP services. type collectHTTPTask struct { // TaskMeta has fields common to all tasks diff --git a/base/collect_http_test.go b/base/collect_http_test.go index 91cb96435..1c2900809 100644 --- a/base/collect_http_test.go +++ b/base/collect_http_test.go @@ -10,7 +10,6 @@ import ( "testing" "fortio.org/fortio/fhttp" - "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" ) @@ -50,37 +49,6 @@ func TestRunCollectHTTP(t *testing.T) { url := fmt.Sprintf("http://localhost:%d/", addr.Port) + foo - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - fmt.Println(string(body)) - - if _, ok := bodyFortioResult[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) - } - }, - }) - // valid collect HTTP task... should succeed ct := &collectHTTPTask{ TaskMeta: TaskMeta{ @@ -107,9 +75,20 @@ func TestRunCollectHTTP(t *testing.T) { exp.initResults(1) err = ct.run(exp) assert.NoError(t, err) - assert.True(t, metricsServerCalled) // ensure that the metrics server is called - assert.True(t, called) // ensure that the /foo/ handler is called + assert.True(t, called) // ensure that the /foo/ handler is called assert.Equal(t, exp.Result.Insights.NumVersions, 1) + + taskData := exp.Result.Insights.TaskData[CollectHTTPTaskName] + assert.NotNil(t, taskData) + + taskDataBytes, err := json.Marshal(taskData) + assert.NoError(t, err) + httpResult := HTTPResult{} + err = json.Unmarshal(taskDataBytes, &httpResult) + assert.NoError(t, err) + + assert.Equal(t, 1, len(httpResult)) + assert.NotNil(t, httpResult[url]) } // If the endpoint does not exist, fail gracefully @@ -185,39 +164,6 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { endpoint1URL := baseURL + foo endpoint2URL := baseURL + bar - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - if _, ok := bodyFortioResult[endpoint1]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", endpoint1)) - } - - if _, ok := bodyFortioResult[endpoint2]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", endpoint2)) - } - }, - }) - // valid collect HTTP task... should succeed ct := &collectHTTPTask{ TaskMeta: TaskMeta{ @@ -255,10 +201,22 @@ func TestRunCollectHTTPMultipleEndpoints(t *testing.T) { exp.initResults(1) err = ct.run(exp) assert.NoError(t, err) - assert.True(t, metricsServerCalled) // ensure that the metrics server is called - assert.True(t, fooCalled) // ensure that the /foo/ handler is called - assert.True(t, barCalled) // ensure that the /bar/ handler is called + assert.True(t, fooCalled) // ensure that the /foo/ handler is called + assert.True(t, barCalled) // ensure that the /bar/ handler is called assert.Equal(t, exp.Result.Insights.NumVersions, 1) + + taskData := exp.Result.Insights.TaskData[CollectHTTPTaskName] + assert.NotNil(t, taskData) + + taskDataBytes, err := json.Marshal(taskData) + assert.NoError(t, err) + httpResult := HTTPResult{} + err = json.Unmarshal(taskDataBytes, &httpResult) + assert.NoError(t, err) + + assert.Equal(t, 2, len(httpResult)) + assert.NotNil(t, httpResult[endpoint1]) + assert.NotNil(t, httpResult[endpoint2]) } // Multiple endpoints are provided but they share one URL @@ -291,39 +249,6 @@ func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { endpoint1 := "endpoint1" endpoint2 := "endpoint2" - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - if _, ok := bodyFortioResult[endpoint1]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", endpoint1)) - } - - if _, ok := bodyFortioResult[endpoint2]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", endpoint2)) - } - }, - }) - // valid collect HTTP task... should succeed ct := &collectHTTPTask{ TaskMeta: TaskMeta{ @@ -362,10 +287,25 @@ func TestRunCollectHTTPSingleEndpointMultipleCalls(t *testing.T) { assert.NoError(t, err) assert.True(t, fooCalled) // ensure that the /foo/ handler is called assert.True(t, barCalled) // ensure that the /bar/ handler is called - assert.True(t, metricsServerCalled) assert.Equal(t, exp.Result.Insights.NumVersions, 1) + + taskData := exp.Result.Insights.TaskData[CollectHTTPTaskName] + assert.NotNil(t, taskData) + + taskDataBytes, err := json.Marshal(taskData) + assert.NoError(t, err) + httpResult := HTTPResult{} + err = json.Unmarshal(taskDataBytes, &httpResult) + assert.NoError(t, err) + + assert.Equal(t, 2, len(httpResult)) + assert.NotNil(t, httpResult[endpoint1]) + assert.NotNil(t, httpResult[endpoint2]) } +// TODO: should this still return insights even though the endpoints cannot be reached? +// This would mean no Grafana dashboard would be produced +// // If the endpoints cannot be reached, then do not throw an error // Should not return an nil pointer dereference error (see #1451) func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { @@ -380,33 +320,6 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { endpoint1URL := baseURL + foo endpoint2URL := baseURL + bar - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - - // no EndpointResults because endpoints cannot be reached - assert.Equal(t, `{}`, string(body)) - }, - }) - // valid collect HTTP task... should succeed ct := &collectHTTPTask{ TaskMeta: TaskMeta{ @@ -444,7 +357,17 @@ func TestRunCollectHTTPMultipleNoEndpoints(t *testing.T) { exp.initResults(1) err = ct.run(exp) assert.NoError(t, err) - assert.True(t, metricsServerCalled) + + taskData := exp.Result.Insights.TaskData[CollectHTTPTaskName] + assert.NotNil(t, taskData) + + taskDataBytes, err := json.Marshal(taskData) + assert.NoError(t, err) + httpResult := HTTPResult{} + err = json.Unmarshal(taskDataBytes, &httpResult) + assert.NoError(t, err) + + assert.Equal(t, 0, len(httpResult)) } func TestErrorCode(t *testing.T) { @@ -474,99 +397,3 @@ func TestErrorCode(t *testing.T) { }) assert.True(t, task.errorCode(5)) } - -func TestPutPerformanceResultToMetricsService(t *testing.T) { - StartHTTPMock(t) - - metricsServerURL := "http://my-server.com" - namespace := "my-namespace" - experiment := "my-experiment" - data := map[string]string{ - "hello": "world", - } - - called := false - httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, - func(req *http.Request) (*http.Response, error) { - called = true - - assert.Equal(t, namespace, req.URL.Query().Get("namespace")) - assert.Equal(t, experiment, req.URL.Query().Get("experiment")) - - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.Equal(t, "{\"hello\":\"world\"}", string(body)) - - return httpmock.NewStringResponse(200, "success"), nil - }) - - err := putPerformanceResultToMetricsService( - metricsServerURL, - namespace, - experiment, - data, - ) - assert.NoError(t, err) - assert.True(t, called) -} - -func TestRunCollectHTTPGrafana(t *testing.T) { - // METRICS_SERVER_URL must be provided - metricsServerURL := "http://iter8.default:8080" - err := os.Setenv("METRICS_SERVER_URL", metricsServerURL) - assert.NoError(t, err) - - // mock metrics server - metricsServerCalled := false - namespace := "default" - experiment := "default" - StartHTTPMock(t) - httpmock.RegisterResponder(http.MethodPut, metricsServerURL+PerformanceResultPath, - func(req *http.Request) (*http.Response, error) { - metricsServerCalled = true - - assert.Equal(t, namespace, req.URL.Query().Get("namespace")) - assert.Equal(t, experiment, req.URL.Query().Get("experiment")) - - return httpmock.NewStringResponse(200, "success"), nil - }) - - mux, addr := fhttp.DynamicHTTPServer(false) - - // mock endpoint - endpointCalled := false - handler := func(w http.ResponseWriter, r *http.Request) { - endpointCalled = true - - w.WriteHeader(200) - } - mux.HandleFunc("/"+foo, handler) - - baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port) - - // valid collect HTTP task... should succeed - ct := &collectHTTPTask{ - TaskMeta: TaskMeta{ - Task: StringPointer(CollectHTTPTaskName), - }, - With: collectHTTPInputs{ - endpoint: endpoint{ - URL: baseURL + foo, - }, - }, - } - - exp := &Experiment{ - Spec: []Task{ct}, - Result: &ExperimentResult{}, - Metadata: ExperimentMetadata{ - Namespace: "default", - Name: "default", - }, - } - exp.initResults(1) - err = ct.run(exp) - assert.NoError(t, err) - assert.True(t, metricsServerCalled) - assert.True(t, endpointCalled) -} diff --git a/base/experiment.go b/base/experiment.go index a8589f49d..cbfa2dbb3 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -239,6 +239,7 @@ func (r *ExperimentResult) initInsightsWithNumVersions(n int) error { if r.Insights == nil { r.Insights = &Insights{ NumVersions: n, + TaskData: map[string]interface{}{}, } } else { if r.Insights.NumVersions != n { diff --git a/base/experiment_test.go b/base/experiment_test.go index ce28df03a..2c3b94d69 100644 --- a/base/experiment_test.go +++ b/base/experiment_test.go @@ -43,35 +43,6 @@ func TestRunningTasks(t *testing.T) { var verifyHandlerCalled bool mux.HandleFunc("/get", GetTrackingHandler(&verifyHandlerCalled)) - // mock metrics server - StartHTTPMock(t) - metricsServerCalled := false - MockMetricsServer(MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - if _, ok := bodyFortioResult[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) - } - }, - }) - _ = os.Chdir(t.TempDir()) // valid collect task... should succeed @@ -100,7 +71,6 @@ func TestRunningTasks(t *testing.T) { err = ct.run(exp) assert.NoError(t, err) assert.Equal(t, exp.Result.Insights.NumVersions, 1) - assert.True(t, metricsServerCalled) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) } @@ -122,7 +92,7 @@ func TestRunExperiment(t *testing.T) { metricsServerCalled := false MockMetricsServer(MockMetricsServerInput{ MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { + ExperimentResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -135,14 +105,10 @@ func TestRunExperiment(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) + bodyExperimentResult := ExperimentResult{} + err = json.Unmarshal(body, &bodyExperimentResult) assert.NoError(t, err) assert.NotNil(t, body) - - if _, ok := bodyFortioResult[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) - } }, }) diff --git a/base/test_helpers.go b/base/test_helpers.go index 97233a9b7..111fedcd6 100644 --- a/base/test_helpers.go +++ b/base/test_helpers.go @@ -2,12 +2,14 @@ package base import ( "bytes" + "fmt" "net/http" "os" "path/filepath" "testing" "time" + log "github.com/iter8-tools/iter8/base/log" "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" ) @@ -23,8 +25,21 @@ func (m *mockDriver) Read() (*Experiment, error) { } // Write an experiment -func (m *mockDriver) Write(e *Experiment) error { - m.Experiment = e +func (m *mockDriver) Write(exp *Experiment) error { + m.Experiment = exp + + // get URL of metrics server from environment variable + metricsServerURL, ok := os.LookupEnv(MetricsServerURL) + if !ok { + errorMessage := "could not look up METRICS_SERVER_URL environment variable" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } + + err := PutExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) + if err != nil { + return err + } return nil } diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go index 5af259c2a..bef6419e7 100644 --- a/cmd/kassert_test.go +++ b/cmd/kassert_test.go @@ -41,7 +41,7 @@ func TestKAssert(t *testing.T) { metricsServerCalled := false base.MockMetricsServer(base.MockMetricsServerInput{ MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { + ExperimentResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -54,14 +54,10 @@ func TestKAssert(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := base.HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) + bodyExperimentResult := base.ExperimentResult{} + err = json.Unmarshal(body, &bodyExperimentResult) assert.NoError(t, err) assert.NotNil(t, body) - - if _, ok := bodyFortioResult[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) - } }, }) @@ -90,13 +86,6 @@ func testAssert(t *testing.T, experiment string, url string, expectedOutputFile name: "k run", cmd: "k run -g default --namespace default", }, - // k assert - { - name: "k assert", - cmd: "k assert -c completed -c nofailure", - golden: base.CompletePath("../testdata", expectedOutputFile), - wantError: expectError, - }, } // fake kube cluster diff --git a/cmd/krun_test.go b/cmd/krun_test.go index 74ed976e2..91ecf26e5 100644 --- a/cmd/krun_test.go +++ b/cmd/krun_test.go @@ -34,28 +34,6 @@ func TestKRun(t *testing.T) { metricsServerCalled := false base.MockMetricsServer(base.MockMetricsServerInput{ MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyFortioResult := base.HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) - assert.NoError(t, err) - assert.NotNil(t, body) - - if _, ok := bodyFortioResult[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) - } - }, ExperimentResultCallback: func(req *http.Request) { metricsServerCalled = true diff --git a/driver/filedriver.go b/driver/filedriver.go index 739f59f75..adcf4861c 100644 --- a/driver/filedriver.go +++ b/driver/filedriver.go @@ -8,6 +8,7 @@ import ( "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/base/log" + "sigs.k8s.io/yaml" ) // FileDriver enables reading and writing experiment spec and result files @@ -28,6 +29,7 @@ func (f *FileDriver) Read() (*base.Experiment, error) { // Write the experiment func (f *FileDriver) Write(exp *base.Experiment) error { + // write to metrics server // get URL of metrics server from environment variable metricsServerURL, ok := os.LookupEnv(base.MetricsServerURL) if !ok { @@ -38,7 +40,17 @@ func (f *FileDriver) Write(exp *base.Experiment) error { err := base.PutExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) if err != nil { - return err + errorMessage := "could not write experiment result to metrics service" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } + + // write to file + b, _ := yaml.Marshal(exp) + err = os.WriteFile(path.Join(f.RunDir, ExperimentPath), b, 0600) + if err != nil { + log.Logger.WithStackTrace(err.Error()).Error("unable to write experiment") + return errors.New("unable to write experiment") } return nil } diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index 94f300492..c0bb2805f 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -35,7 +35,7 @@ func TestLocalRun(t *testing.T) { metricsServerCalled := false base.MockMetricsServer(base.MockMetricsServerInput{ MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { + ExperimentResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -48,14 +48,10 @@ func TestLocalRun(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := base.HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) + bodyExperimentResult := base.ExperimentResult{} + err = json.Unmarshal(body, &bodyExperimentResult) assert.NoError(t, err) assert.NotNil(t, body) - - if _, ok := bodyFortioResult[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) - } }, }) @@ -71,6 +67,7 @@ func TestLocalRun(t *testing.T) { assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) + assert.True(t, metricsServerCalled) // check results exp, err := base.BuildExperiment(&fd) @@ -81,7 +78,6 @@ func TestLocalRun(t *testing.T) { fmt.Println(err, exp.Completed(), exp.NoFailure()) assert.True(t, exp.Completed() && exp.NoFailure()) - assert.True(t, metricsServerCalled) } func TestFileDriverReadError(t *testing.T) { diff --git a/driver/kubedriver.go b/driver/kubedriver.go index 02a0ac409..57efddca6 100644 --- a/driver/kubedriver.go +++ b/driver/kubedriver.go @@ -251,6 +251,7 @@ func (kd *KubeDriver) updateExperimentSecret(e *base.Experiment) error { // Write writes a Kubernetes experiment func (kd *KubeDriver) Write(exp *base.Experiment) error { + // write to metrics server // get URL of metrics server from environment variable metricsServerURL, ok := os.LookupEnv(base.MetricsServerURL) if !ok { @@ -261,7 +262,15 @@ func (kd *KubeDriver) Write(exp *base.Experiment) error { err := base.PutExperimentResultToMetricsService(metricsServerURL, exp.Metadata.Namespace, exp.Metadata.Name, exp.Result) if err != nil { - return err + errorMessage := "could not write experiment result to metrics service" + log.Logger.Error(errorMessage) + return fmt.Errorf(errorMessage) + } + + // write to secret + if err := kd.updateExperimentSecret(exp); err != nil { + log.Logger.WithStackTrace(err.Error()).Error("unable to write experiment") + return errors.New("unable to write experiment") } return nil } diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index c6f2a2f8b..164480f10 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -84,7 +84,7 @@ func TestKubeRun(t *testing.T) { metricsServerCalled := false base.MockMetricsServer(base.MockMetricsServerInput{ MetricsServerURL: metricsServerURL, - PerformanceResultCallback: func(req *http.Request) { + ExperimentResultCallback: func(req *http.Request) { metricsServerCalled = true // check query parameters @@ -97,14 +97,10 @@ func TestKubeRun(t *testing.T) { assert.NotNil(t, body) // check payload content - bodyFortioResult := base.HTTPResult{} - err = json.Unmarshal(body, &bodyFortioResult) + bodyExperimentResult := base.ExperimentResult{} + err = json.Unmarshal(body, &bodyExperimentResult) assert.NoError(t, err) assert.NotNil(t, body) - - if _, ok := bodyFortioResult[url]; !ok { - assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) - } }, }) @@ -145,6 +141,11 @@ func TestKubeRun(t *testing.T) { // check results exp, err := base.BuildExperiment(kd) assert.NoError(t, err) + + x, _ := json.Marshal(exp) + fmt.Println(string(x)) + fmt.Println(err, exp.Completed(), exp.NoFailure()) + assert.True(t, exp.Completed() && exp.NoFailure()) } diff --git a/metrics/server.go b/metrics/server.go index c9e3aaad6..7281c975c 100644 --- a/metrics/server.go +++ b/metrics/server.go @@ -507,12 +507,20 @@ func getHTTPDashboardHelper(experimentResult *base.ExperimentResult) httpDashboa // get raw data from ExperimentResult httpTaskData := experimentResult.Insights.TaskData[util.CollectHTTPTaskName] if httpTaskData == nil { + log.Logger.Error("cannot get http task data from Insights") return dashboard } - // cast the raw data (task data) into HTTPResult - httpResult, ok := httpTaskData.(util.HTTPResult) - if !ok { + httpTaskDataBytes, err := json.Marshal(httpTaskData) + if err != nil { + log.Logger.Error("cannot marshal http task data") + return dashboard + } + + httpResult := base.HTTPResult{} + err = json.Unmarshal(httpTaskDataBytes, &httpResult) + if err != nil { + log.Logger.Error("cannot unmarshal http task data into HTTPResult") return dashboard } @@ -635,14 +643,21 @@ func getGRPCDashboardHelper(experimentResult *base.ExperimentResult) ghzDashboar } // get raw data from ExperimentResult - httpTaskData := experimentResult.Insights.TaskData[util.CollectHTTPTaskName] - if httpTaskData == nil { + ghzTaskData := experimentResult.Insights.TaskData[util.CollectGRPCTaskName] + if ghzTaskData == nil { + return dashboard + } + + ghzTaskDataBytes, err := json.Marshal(ghzTaskData) + if err != nil { + log.Logger.Error("cannot marshal ghz task data") return dashboard } - // cast the raw data (task data) into HTTPResult - ghzResult, ok := httpTaskData.(util.GHZResult) - if !ok { + ghzResult := base.GHZResult{} + err = json.Unmarshal(ghzTaskDataBytes, &ghzResult) + if err != nil { + log.Logger.Error("cannot unmarshal ghz task data into GHZResult") return dashboard } diff --git a/metrics/server_test.go b/metrics/server_test.go index 9cb650015..f818860c6 100644 --- a/metrics/server_test.go +++ b/metrics/server_test.go @@ -872,7 +872,7 @@ func TestGetGRPCDashboardHelper(t *testing.T) { NumCompletedTasks: 5, Insights: &util.Insights{ TaskData: map[string]interface{}{ - util.CollectHTTPTaskName: ghzResult, + util.CollectGRPCTaskName: ghzResult, }, }, } @@ -995,7 +995,7 @@ func TestPutExperimentResult(t *testing.T) { func TestGetHTTPDashboardInvalidMethod(t *testing.T) { w := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodPost, util.GRPCDashboardPath, nil) + req := httptest.NewRequest(http.MethodPost, util.HTTPDashboardPath, nil) getHTTPDashboard(w, req) res := w.Result() defer func() { @@ -1105,111 +1105,114 @@ func TestGetHTTPDashboard(t *testing.T) { ) } -// func TestGetGRPCDashboardInvalidMethod(t *testing.T) { -// w := httptest.NewRecorder() -// req := httptest.NewRequest(http.MethodPost, util.PerformanceResultPath, nil) -// putPerformanceResult(w, req) -// res := w.Result() -// defer func() { -// err := res.Body.Close() -// assert.NoError(t, err) -// }() -// assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) -// } - -// func TestGetGRPCDashboardMissingParameter(t *testing.T) { -// tests := []struct { -// queryParams url.Values -// expectedStatusCode int -// }{ -// { -// expectedStatusCode: http.StatusBadRequest, -// }, -// { -// queryParams: url.Values{ -// "namespace": {"default"}, -// }, -// expectedStatusCode: http.StatusBadRequest, -// }, -// { -// queryParams: url.Values{ -// "experiment": {"default"}, -// }, -// expectedStatusCode: http.StatusBadRequest, -// }, -// } - -// for _, test := range tests { -// w := httptest.NewRecorder() - -// u, err := url.ParseRequestURI(util.PerformanceResultPath) -// assert.NoError(t, err) -// u.RawQuery = test.queryParams.Encode() -// urlStr := fmt.Sprintf("%v", u) - -// req := httptest.NewRequest(http.MethodPut, urlStr, nil) - -// putPerformanceResult(w, req) -// res := w.Result() -// defer func() { -// err := res.Body.Close() -// assert.NoError(t, err) -// }() - -// assert.Equal(t, test.expectedStatusCode, res.StatusCode) -// } -// } - -// func TestGetGRPCDashboard(t *testing.T) { -// // instantiate metrics client -// tempDirPath := t.TempDir() -// client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) -// assert.NoError(t, err) -// abn.MetricsClient = client - -// // preload metric client with ghz result -// err = abn.MetricsClient.SetData("default", "default", []byte(ghzResultJSON)) -// assert.NoError(t, err) - -// // preload metric client with experiment result -// experimentResult := util.ExperimentResult{ -// Name: myName, -// Namespace: myNamespace, -// NumCompletedTasks: 5, -// } -// experimentResultBytes, err := json.Marshal(experimentResult) -// assert.NoError(t, err) -// err = abn.MetricsClient.SetExperimentResult("default", "default", []byte(experimentResultBytes)) -// assert.NoError(t, err) - -// w := httptest.NewRecorder() - -// // construct inputs to getGRPCDashboard -// u, err := url.ParseRequestURI(util.PerformanceResultPath) -// assert.NoError(t, err) -// params := url.Values{ -// "namespace": {"default"}, -// "experiment": {"default"}, -// } -// u.RawQuery = params.Encode() -// urlStr := fmt.Sprintf("%v", u) - -// req := httptest.NewRequest(http.MethodGet, urlStr, nil) - -// // get ghz dashboard based on result in metrics client -// getGRPCDashboard(w, req) -// res := w.Result() -// defer func() { -// err := res.Body.Close() -// assert.NoError(t, err) -// }() - -// // check the ghz dashboard -// body, err := io.ReadAll(res.Body) -// assert.NoError(t, err) -// assert.Equal( -// t, -// ghzDashboardJSON, -// string(body), -// ) -// } +func TestGetGRPCDashboardInvalidMethod(t *testing.T) { + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, util.GRPCDashboardPath, nil) + getGRPCDashboard(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) +} + +func TestGetGRPCDashboardMissingParameter(t *testing.T) { + tests := []struct { + queryParams url.Values + expectedStatusCode int + }{ + { + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "namespace": {"default"}, + }, + expectedStatusCode: http.StatusBadRequest, + }, + { + queryParams: url.Values{ + "experiment": {"default"}, + }, + expectedStatusCode: http.StatusBadRequest, + }, + } + + for _, test := range tests { + w := httptest.NewRecorder() + + u, err := url.ParseRequestURI(util.GRPCDashboardPath) + assert.NoError(t, err) + u.RawQuery = test.queryParams.Encode() + urlStr := fmt.Sprintf("%v", u) + + req := httptest.NewRequest(http.MethodGet, urlStr, nil) + + getGRPCDashboard(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + + assert.Equal(t, test.expectedStatusCode, res.StatusCode) + } +} + +func TestGetGRPCDashboard(t *testing.T) { + // instantiate metrics client + tempDirPath := t.TempDir() + client, err := badgerdb.GetClient(badger.DefaultOptions(tempDirPath), badgerdb.AdditionalOptions{}) + assert.NoError(t, err) + abn.MetricsClient = client + + // preload metric client with experiment result + ghzResult := util.GHZResult{} + err = json.Unmarshal([]byte(ghzResultJSON), &ghzResult) + assert.NoError(t, err) + + experimentResult := util.ExperimentResult{ + Name: myName, + Namespace: myNamespace, + NumCompletedTasks: 5, + Insights: &util.Insights{ + TaskData: map[string]interface{}{ + util.CollectGRPCTaskName: ghzResult, + }, + }, + } + + err = abn.MetricsClient.SetExperimentResult("default", "default", &experimentResult) + assert.NoError(t, err) + w := httptest.NewRecorder() + + // construct inputs to getGRPCDashboard + u, err := url.ParseRequestURI(util.GRPCDashboardPath) + assert.NoError(t, err) + params := url.Values{ + "namespace": {"default"}, + "experiment": {"default"}, + } + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) + + req := httptest.NewRequest(http.MethodGet, urlStr, nil) + + // get ghz dashboard based on result in metrics client + getGRPCDashboard(w, req) + res := w.Result() + defer func() { + err := res.Body.Close() + assert.NoError(t, err) + }() + + // check the ghz dashboard + body, err := io.ReadAll(res.Body) + assert.NoError(t, err) + assert.Equal( + t, + ghzDashboardJSON, + string(body), + ) +} From a4c30e29521041856d554ea2ff2eef2a730c451c Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Sun, 20 Aug 2023 11:15:18 -0400 Subject: [PATCH 069/121] Remove assert command Signed-off-by: Alan Cha --- cmd/k.go | 3 -- cmd/kassert.go | 71 ---------------------------- cmd/kassert_test.go | 112 -------------------------------------------- cmd/krun_test.go | 5 ++ 4 files changed, 5 insertions(+), 186 deletions(-) delete mode 100644 cmd/kassert.go delete mode 100644 cmd/kassert_test.go diff --git a/cmd/k.go b/cmd/k.go index 5e704d654..63450eba7 100644 --- a/cmd/k.go +++ b/cmd/k.go @@ -40,9 +40,6 @@ func init() { os.Exit(1) } - // add k assert - kcmd.AddCommand(newKAssertCmd(kd)) - // add k delete kcmd.AddCommand(newKDeleteCmd(kd, os.Stdout)) diff --git a/cmd/kassert.go b/cmd/kassert.go deleted file mode 100644 index d59f6d79c..000000000 --- a/cmd/kassert.go +++ /dev/null @@ -1,71 +0,0 @@ -package cmd - -import ( - "errors" - "fmt" - "time" - - ia "github.com/iter8-tools/iter8/action" - "github.com/iter8-tools/iter8/base/log" - "github.com/iter8-tools/iter8/driver" - "github.com/spf13/cobra" -) - -// kassertDesc is the description of the k assert cmd -const kassertDesc = ` -Assert if the result of a Kubernetes experiment satisfies the specified conditions. If all conditions are satisfied, the command exits with code 0. Else, the command exits with code 1. - -Assertions are especially useful for automation inside CI/CD/GitOps pipelines. - -Supported conditions are 'completed' and 'nofailure' which indicate that the experiment has completed and none of the tasks have failed. - - iter8 k assert -c completed -c nofailure - # same as iter8 k assert -c completed,nofailure - -You can optionally specify a timeout, which is the maximum amount of time to wait for the conditions to be satisfied: - - iter8 k assert -c completed,nofailure -t 5s -` - -// newAssertCmd creates the Kubernetes assert command -func newKAssertCmd(kd *driver.KubeDriver) *cobra.Command { - actor := ia.NewAssertOpts(kd) - - cmd := &cobra.Command{ - Use: "assert", - Short: "Assert if Kubernetes experiment result satisfies conditions", - Long: kassertDesc, - SilenceUsage: true, - RunE: func(_ *cobra.Command, _ []string) error { - allGood, err := actor.KubeRun() - if err != nil { - return err - } - if !allGood { - e := errors.New("assert conditions failed") - log.Logger.Error(e) - return e - } - return nil - }, - } - // options specific to k assert - addExperimentGroupFlag(cmd, &actor.Group) - actor.EnvSettings = settings - - // options shared with assert - addConditionFlag(cmd, &actor.Conditions) - addTimeoutFlag(cmd, &actor.Timeout) - return cmd -} - -// addConditionFlag adds the condition flag to command -func addConditionFlag(cmd *cobra.Command, conditionPtr *[]string) { - cmd.Flags().StringSliceVarP(conditionPtr, "condition", "c", nil, fmt.Sprintf("%v | %v; can specify multiple or separate conditions with commas;", ia.Completed, ia.NoFailure)) - _ = cmd.MarkFlagRequired("condition") -} - -// addTimeoutFlag adds timeout flag to command -func addTimeoutFlag(cmd *cobra.Command, timeoutPtr *time.Duration) { - cmd.Flags().DurationVar(timeoutPtr, "timeout", 0, "timeout duration (e.g., 5s)") -} diff --git a/cmd/kassert_test.go b/cmd/kassert_test.go deleted file mode 100644 index bef6419e7..000000000 --- a/cmd/kassert_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package cmd - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "testing" - - "fortio.org/fortio/fhttp" - "github.com/iter8-tools/iter8/base" - id "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - myName = "myName" - myNamespace = "myNamespace" -) - -func TestKAssert(t *testing.T) { - // define METRICS_SERVER_URL - metricsServerURL := "http://iter8.default:8080" - err := os.Setenv(base.MetricsServerURL, metricsServerURL) - assert.NoError(t, err) - - // create and configure HTTP endpoint for testing - mux, addr := fhttp.DynamicHTTPServer(false) - url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) - var verifyHandlerCalled bool - mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) - - // mock metrics server - base.StartHTTPMock(t) - metricsServerCalled := false - base.MockMetricsServer(base.MockMetricsServerInput{ - MetricsServerURL: metricsServerURL, - ExperimentResultCallback: func(req *http.Request) { - metricsServerCalled = true - - // check query parameters - assert.Equal(t, myName, req.URL.Query().Get("experiment")) - assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - - // check payload - body, err := io.ReadAll(req.Body) - assert.NoError(t, err) - assert.NotNil(t, body) - - // check payload content - bodyExperimentResult := base.ExperimentResult{} - err = json.Unmarshal(body, &bodyExperimentResult) - assert.NoError(t, err) - assert.NotNil(t, body) - }, - }) - - _ = os.Chdir(t.TempDir()) - - // create experiment.yaml - base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, id.ExperimentPath) - - // run test - testAssert(t, id.ExperimentPath, url, "output/kassert.txt", false) - assert.True(t, metricsServerCalled) - // sanity check -- handler was called - assert.True(t, verifyHandlerCalled) -} - -func testAssert(t *testing.T, experiment string, url string, expectedOutputFile string, expectError bool) { - tests := []cmdTestCase{ - // k launch - { - name: "k launch", - cmd: fmt.Sprintf("k launch -c %v --localChart --set tasks={http} --set http.url=%s --set http.duration=2s", base.CompletePath("../charts", "iter8"), url), - golden: base.CompletePath("../testdata", "output/klaunch.txt"), - }, - // k run - { - name: "k run", - cmd: "k run -g default --namespace default", - }, - } - - // fake kube cluster - *kd = *id.NewFakeKubeDriver(settings) - - // read experiment from file created by caller - byteArray, _ := os.ReadFile(filepath.Clean(experiment)) - _, _ = kd.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{id.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - _, _ = kd.Clientset.BatchV1().Jobs("default").Create(context.TODO(), &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default-1-job", - Namespace: "default", - }, - }, metav1.CreateOptions{}) - - runTestActionCmd(t, tests) -} diff --git a/cmd/krun_test.go b/cmd/krun_test.go index 91ecf26e5..6fb450b7a 100644 --- a/cmd/krun_test.go +++ b/cmd/krun_test.go @@ -17,6 +17,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + myName = "myName" + myNamespace = "myNamespace" +) + func TestKRun(t *testing.T) { // define METRICS_SERVER_URL metricsServerURL := "http://iter8.default:8080" From 7237046ed0eedf1b7d18f4d47327f8db33e4db94 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Sun, 20 Aug 2023 12:27:20 -0400 Subject: [PATCH 070/121] Uncomment test Signed-off-by: Alan Cha --- action/run_test.go | 188 ++++++++++++++++++++++----------------------- 1 file changed, 92 insertions(+), 96 deletions(-) diff --git a/action/run_test.go b/action/run_test.go index 551a44260..a1e63069b 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -1,98 +1,94 @@ package action -// import ( -// "context" -// "encoding/json" -// "fmt" -// "io" -// "net/http" -// "os" -// "testing" - -// "fortio.org/fortio/fhttp" -// "github.com/iter8-tools/iter8/base" -// "github.com/iter8-tools/iter8/driver" -// "github.com/stretchr/testify/assert" -// "helm.sh/helm/v3/pkg/cli" -// corev1 "k8s.io/api/core/v1" -// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -// ) - -// const ( -// myName = "myName" -// myNamespace = "myNamespace" -// ) - -// func TestKubeRun(t *testing.T) { -// // define METRICS_SERVER_URL -// metricsServerURL := "http://iter8.default:8080" -// err := os.Setenv(base.MetricsServerURL, metricsServerURL) -// assert.NoError(t, err) - -// // create and configure HTTP endpoint for testing -// mux, addr := fhttp.DynamicHTTPServer(false) -// url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) -// var verifyHandlerCalled bool -// mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) - -// // mock metrics server -// base.StartHTTPMock(t) -// metricsServerCalled := false -// base.MockMetricsServer(base.MockMetricsServerInput{ -// MetricsServerURL: metricsServerURL, -// PerformanceResultCallback: func(req *http.Request) { -// metricsServerCalled = true - -// // check query parameters -// assert.Equal(t, myName, req.URL.Query().Get("experiment")) -// assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) - -// // check payload -// body, err := io.ReadAll(req.Body) -// assert.NoError(t, err) -// assert.NotNil(t, body) - -// // check payload content -// bodyFortioResult := base.HTTPResult{} -// err = json.Unmarshal(body, &bodyFortioResult) -// assert.NoError(t, err) -// assert.NotNil(t, body) - -// if _, ok := bodyFortioResult[url]; !ok { -// assert.Fail(t, fmt.Sprintf("payload FortioResult does not contain endpoint: %s", url)) -// } -// }, -// }) - -// _ = os.Chdir(t.TempDir()) - -// // create experiment.yaml -// base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, driver.ExperimentPath) - -// // fix rOpts -// rOpts := NewRunOpts(driver.NewFakeKubeDriver(cli.New())) - -// // read experiment from file created above -// byteArray, _ := os.ReadFile(driver.ExperimentPath) -// _, _ = rOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: "default", -// Namespace: "default", -// }, -// StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, -// }, metav1.CreateOptions{}) - -// err = rOpts.KubeRun() -// assert.NoError(t, err) -// // sanity check -- handler was called -// assert.True(t, verifyHandlerCalled) -// assert.True(t, metricsServerCalled) - -// // check results -// exp, err := base.BuildExperiment(rOpts.KubeDriver) -// assert.NoError(t, err) -// assert.True(t, exp.Completed()) -// assert.True(t, exp.NoFailure()) -// assert.Equal(t, 1, exp.Result.NumCompletedTasks) - -// } +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "testing" + + "fortio.org/fortio/fhttp" + "github.com/iter8-tools/iter8/base" + "github.com/iter8-tools/iter8/driver" + "github.com/stretchr/testify/assert" + "helm.sh/helm/v3/pkg/cli" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + myName = "myName" + myNamespace = "myNamespace" +) + +func TestKubeRun(t *testing.T) { + // define METRICS_SERVER_URL + metricsServerURL := "http://iter8.default:8080" + err := os.Setenv(base.MetricsServerURL, metricsServerURL) + assert.NoError(t, err) + + // create and configure HTTP endpoint for testing + mux, addr := fhttp.DynamicHTTPServer(false) + url := fmt.Sprintf("http://127.0.0.1:%d/get", addr.Port) + var verifyHandlerCalled bool + mux.HandleFunc("/get", base.GetTrackingHandler(&verifyHandlerCalled)) + + // mock metrics server + base.StartHTTPMock(t) + metricsServerCalled := false + base.MockMetricsServer(base.MockMetricsServerInput{ + MetricsServerURL: metricsServerURL, + ExperimentResultCallback: func(req *http.Request) { + metricsServerCalled = true + + // check query parameters + assert.Equal(t, myName, req.URL.Query().Get("experiment")) + assert.Equal(t, myNamespace, req.URL.Query().Get("namespace")) + + // check payload + body, err := io.ReadAll(req.Body) + assert.NoError(t, err) + assert.NotNil(t, body) + + // check payload content + bodyExperimentResult := base.ExperimentResult{} + err = json.Unmarshal(body, &bodyExperimentResult) + assert.NoError(t, err) + assert.NotNil(t, body) + }, + }) + + _ = os.Chdir(t.TempDir()) + + // create experiment.yaml + base.CreateExperimentYaml(t, base.CompletePath("../testdata", "experiment.tpl"), url, driver.ExperimentPath) + + // fix rOpts + rOpts := NewRunOpts(driver.NewFakeKubeDriver(cli.New())) + + // read experiment from file created above + byteArray, _ := os.ReadFile(driver.ExperimentPath) + _, _ = rOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "default", + }, + StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, + }, metav1.CreateOptions{}) + + err = rOpts.KubeRun() + assert.NoError(t, err) + // sanity check -- handler was called + assert.True(t, verifyHandlerCalled) + assert.True(t, metricsServerCalled) + + // check results + exp, err := base.BuildExperiment(rOpts.KubeDriver) + assert.NoError(t, err) + assert.True(t, exp.Completed()) + assert.True(t, exp.NoFailure()) + assert.Equal(t, 1, exp.Result.NumCompletedTasks) + +} From 603d44c66793c61c649afed3a7487d913f14dd41 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Sun, 20 Aug 2023 13:22:27 -0400 Subject: [PATCH 071/121] Remove assert Signed-off-by: Alan Cha --- action/assert.go | 111 ------------------------------------------ action/assert_test.go | 34 ------------- 2 files changed, 145 deletions(-) delete mode 100644 action/assert.go delete mode 100644 action/assert_test.go diff --git a/action/assert.go b/action/assert.go deleted file mode 100644 index 8258cb28f..000000000 --- a/action/assert.go +++ /dev/null @@ -1,111 +0,0 @@ -package action - -import ( - "fmt" - "strings" - "time" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/base/log" - "github.com/iter8-tools/iter8/driver" -) - -const ( - // Completed states that the experiment is complete - Completed = "completed" - // NoFailure states that none of the tasks in the experiment have failed - NoFailure = "nofailure" -) - -// AssertOpts are the options used for asserting experiment results -type AssertOpts struct { - // Timeout is the duration to wait for conditions to be satisfied - Timeout time.Duration - // Conditions are checked by assert - Conditions []string - // RunOpts provides options relating to experiment resources - RunOpts -} - -// NewAssertOpts initializes and returns assert opts -func NewAssertOpts(kd *driver.KubeDriver) *AssertOpts { - return &AssertOpts{ - RunOpts: *NewRunOpts(kd), - } -} - -// KubeRun asserts conditions for a Kubernetes experiment -func (aOpts *AssertOpts) KubeRun() (bool, error) { - if err := aOpts.KubeDriver.Init(); err != nil { - return false, err - } - - return aOpts.Run(aOpts.KubeDriver) -} - -// Run builds the experiment and verifies assert conditions -func (aOpts *AssertOpts) Run(eio base.Driver) (bool, error) { - allGood, err := aOpts.verify(eio) - if err != nil { - return false, err - } - if !allGood { - log.Logger.Error("assert conditions failed") - return false, nil - } - return true, nil -} - -// verify implements the core logic of assert -func (aOpts *AssertOpts) verify(eio base.Driver) (bool, error) { - // timeSpent tracks how much time has been spent so far in assert attempts - var timeSpent, _ = time.ParseDuration("0s") - - // sleepTime specifies how long to sleep in between retries of asserts - var sleepTime, _ = time.ParseDuration("3s") - - // check assert conditions - for { - exp, err := base.BuildExperiment(eio) - if err != nil { - return false, err - } - - allGood := true - - for _, cond := range aOpts.Conditions { - if strings.ToLower(cond) == Completed { - c := exp.Completed() - allGood = allGood && c - if c { - log.Logger.Info("experiment completed") - } else { - log.Logger.Info("experiment did not complete") - } - } else if strings.ToLower(cond) == NoFailure { - nf := exp.NoFailure() - allGood = allGood && nf - if nf { - log.Logger.Info("experiment has no failure") - } else { - log.Logger.Info("experiment failed") - } - } else { - log.Logger.Error("unsupported assert condition detected; ", cond) - return false, fmt.Errorf("unsupported assert condition detected; %v", cond) - } - } - - if allGood { - log.Logger.Info("all conditions were satisfied") - return true, nil - } - if timeSpent >= aOpts.Timeout { - log.Logger.Info("not all conditions were satisfied") - return false, nil - } - log.Logger.Infof("sleeping %v ................................", sleepTime) - time.Sleep(sleepTime) - timeSpent += sleepTime - } -} diff --git a/action/assert_test.go b/action/assert_test.go deleted file mode 100644 index 6a0d44efb..000000000 --- a/action/assert_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package action - -import ( - "context" - "os" - "testing" - - "github.com/iter8-tools/iter8/base" - "github.com/iter8-tools/iter8/driver" - "github.com/stretchr/testify/assert" - "helm.sh/helm/v3/pkg/cli" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestKubeAssert(t *testing.T) { - _ = os.Chdir(t.TempDir()) - // fix aOpts - aOpts := NewAssertOpts(driver.NewFakeKubeDriver(cli.New())) - aOpts.Conditions = []string{Completed, NoFailure} - - byteArray, _ := os.ReadFile(base.CompletePath("../testdata/assertinputs", driver.ExperimentPath)) - _, _ = aOpts.Clientset.CoreV1().Secrets("default").Create(context.TODO(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - Namespace: "default", - }, - StringData: map[string]string{driver.ExperimentPath: string(byteArray)}, - }, metav1.CreateOptions{}) - - ok, err := aOpts.KubeRun() - assert.True(t, ok) - assert.NoError(t, err) -} From cc6557b53a90ab3871d3da1562d00fead082aa57 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 21 Aug 2023 08:32:25 -0400 Subject: [PATCH 072/121] Remove original Write() implmentation Signed-off-by: Alan Cha --- driver/filedriver.go | 8 ------- driver/filedriver_test.go | 13 +++-------- driver/kubedriver.go | 45 --------------------------------------- driver/kubedriver_test.go | 24 +++------------------ 4 files changed, 6 insertions(+), 84 deletions(-) diff --git a/driver/filedriver.go b/driver/filedriver.go index adcf4861c..b3e3d7161 100644 --- a/driver/filedriver.go +++ b/driver/filedriver.go @@ -8,7 +8,6 @@ import ( "github.com/iter8-tools/iter8/base" "github.com/iter8-tools/iter8/base/log" - "sigs.k8s.io/yaml" ) // FileDriver enables reading and writing experiment spec and result files @@ -45,13 +44,6 @@ func (f *FileDriver) Write(exp *base.Experiment) error { return fmt.Errorf(errorMessage) } - // write to file - b, _ := yaml.Marshal(exp) - err = os.WriteFile(path.Join(f.RunDir, ExperimentPath), b, 0600) - if err != nil { - log.Logger.WithStackTrace(err.Error()).Error("unable to write experiment") - return errors.New("unable to write experiment") - } return nil } diff --git a/driver/filedriver_test.go b/driver/filedriver_test.go index c0bb2805f..695a5ca16 100644 --- a/driver/filedriver_test.go +++ b/driver/filedriver_test.go @@ -52,6 +52,9 @@ func TestLocalRun(t *testing.T) { err = json.Unmarshal(body, &bodyExperimentResult) assert.NoError(t, err) assert.NotNil(t, body) + + // no experiment failure + assert.False(t, bodyExperimentResult.Failure) }, }) @@ -68,16 +71,6 @@ func TestLocalRun(t *testing.T) { // sanity check -- handler was called assert.True(t, verifyHandlerCalled) assert.True(t, metricsServerCalled) - - // check results - exp, err := base.BuildExperiment(&fd) - assert.NoError(t, err) - - x, _ := json.Marshal(exp) - fmt.Println(string(x)) - fmt.Println(err, exp.Completed(), exp.NoFailure()) - - assert.True(t, exp.Completed() && exp.NoFailure()) } func TestFileDriverReadError(t *testing.T) { diff --git a/driver/kubedriver.go b/driver/kubedriver.go index 57efddca6..d0db248bc 100644 --- a/driver/kubedriver.go +++ b/driver/kubedriver.go @@ -31,7 +31,6 @@ import ( "helm.sh/helm/v3/pkg/getter" "helm.sh/helm/v3/pkg/release" "k8s.io/client-go/kubernetes" - "sigs.k8s.io/yaml" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -210,45 +209,6 @@ func (kd *KubeDriver) Read() (*base.Experiment, error) { return ExperimentFromBytes(b) } -// formExperimentSecret creates the experiment secret using the experiment -func (kd *KubeDriver) formExperimentSecret(e *base.Experiment) (*corev1.Secret, error) { - byteArray, err := yaml.Marshal(e) - if err != nil { - return nil, err - } - // log.Logger.Debug(string(byteArray)) - sec := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: kd.getExperimentSecretName(), - Annotations: map[string]string{ - "iter8.tools/group": kd.Group, - }, - }, - StringData: map[string]string{ExperimentPath: string(byteArray)}, - } - // formed experiment secret ... - return &sec, nil -} - -// updateExperimentSecret updates the experiment secret -// as opposed to patch, update is an atomic operation -func (kd *KubeDriver) updateExperimentSecret(e *base.Experiment) error { - if sec, err := kd.formExperimentSecret(e); err == nil { - secretsClient := kd.Clientset.CoreV1().Secrets(kd.Namespace()) - _, err1 := secretsClient.Update(context.Background(), sec, metav1.UpdateOptions{}) - // TODO: Evaluate if result secret update requires retries. - // Probably not. Conflicts will be avoided if cronjob avoids parallel jobs. - if err1 != nil { - err2 := fmt.Errorf("unable to update secret %v", sec.Name) - log.Logger.WithStackTrace(err1.Error()).Error(err2) - return err2 - } - } else { - return err - } - return nil -} - // Write writes a Kubernetes experiment func (kd *KubeDriver) Write(exp *base.Experiment) error { // write to metrics server @@ -267,11 +227,6 @@ func (kd *KubeDriver) Write(exp *base.Experiment) error { return fmt.Errorf(errorMessage) } - // write to secret - if err := kd.updateExperimentSecret(exp); err != nil { - log.Logger.WithStackTrace(err.Error()).Error("unable to write experiment") - return errors.New("unable to write experiment") - } return nil } diff --git a/driver/kubedriver_test.go b/driver/kubedriver_test.go index 164480f10..93e1cecf4 100644 --- a/driver/kubedriver_test.go +++ b/driver/kubedriver_test.go @@ -101,6 +101,9 @@ func TestKubeRun(t *testing.T) { err = json.Unmarshal(body, &bodyExperimentResult) assert.NoError(t, err) assert.NotNil(t, body) + + // no experiment failure + assert.False(t, bodyExperimentResult.Failure) }, }) @@ -121,32 +124,11 @@ func TestKubeRun(t *testing.T) { StringData: map[string]string{ExperimentPath: string(byteArray)}, }, metav1.CreateOptions{}) - // _, _ = kd.Clientset.BatchV1().Jobs("default").Create(context.TODO(), &batchv1.Job{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: "default-1-job", - // Namespace: "default", - // Annotations: map[string]string{ - // "iter8.tools/group": "default", - // "iter8.tools/revision": "1", - // }, - // }, - // }, metav1.CreateOptions{}) - err = base.RunExperiment(kd) assert.NoError(t, err) // sanity check -- handler was called assert.True(t, verifyHandlerCalled) assert.True(t, metricsServerCalled) - - // check results - exp, err := base.BuildExperiment(kd) - assert.NoError(t, err) - - x, _ := json.Marshal(exp) - fmt.Println(string(x)) - fmt.Println(err, exp.Completed(), exp.NoFailure()) - - assert.True(t, exp.Completed() && exp.NoFailure()) } func TestLogs(t *testing.T) { From 28595348b17daff6fb699b61ad964d69ca9dcdaa Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 21 Aug 2023 08:48:57 -0400 Subject: [PATCH 073/121] Update tests Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 13 +- .github/workflows/testcharts.yaml | 225 +------------------------ .github/workflows/testperformance.yaml | 65 +------ 3 files changed, 15 insertions(+), 288 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index c4bc476f4..9cae0ef33 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -109,7 +109,6 @@ jobs: --set http.url="http://httpbin.default/get" - name: try other iter8 k commands run: | - iter8 k assert -c completed -c nofailure --timeout 60s iter8 k log iter8 k delete @@ -146,11 +145,8 @@ jobs: --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ --set grpc.call=routeguide.RouteGuide.GetFeature \ --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - --set runner=job - name: try other iter8 k commands run: | - iter8 k assert -c completed -c nofailure --timeout 60s - iter8 k report iter8 k log iter8 k delete @@ -179,7 +175,6 @@ jobs: --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" - name: try other iter8 k commands run: | - iter8 k assert -c completed -c nofailure --timeout 60s iter8 k log iter8 k delete @@ -206,9 +201,6 @@ jobs: --set ready.service="httpbin" \ --set ready.timeout=60s \ --set http.url=http://httpbin.default - - name: k assert experiment completed without failures - run: | - iter8 k assert -c completed -c nofailure --timeout 60s readiness-with-namespace: name: Kubernetes readiness test with namespace @@ -234,7 +226,4 @@ jobs: --set ready.service="httpbin" \ --set ready.timeout=60s \ --set ready.namespace=default \ - --set http.url=http://httpbin.default/get - - name: k assert experiment completed without failures - run: | - iter8 k assert -n experiments -c completed -c nofailure --timeout 60s \ No newline at end of file + --set http.url=http://httpbin.default/get \ No newline at end of file diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index 67c090057..16a995fc0 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -79,18 +79,12 @@ jobs: iter8 k launch \ --localChart \ --chartName charts/iter8 \ - --set "tasks={http,assess}" \ + --set "tasks={http}" \ --set http.url="http://httpbin.default/get" \ - --set assess.SLOs.upper.http/latency-p50=5 \ - --set assess.SLOs.upper.http/latency-p90=10 \ - --set assess.SLOs.upper.http/error-count=0 \ - --set runner=job - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report iter8 k log iter8 k delete @@ -136,19 +130,13 @@ jobs: iter8 k launch \ --localChart \ --chartName charts/iter8 \ - --set "tasks={http,assess}" \ + --set "tasks={http}" \ --set http.url="http://httpbin.default/post" \ --set http.payloadStr=hello \ - --set assess.SLOs.upper.http/latency-p50=5 \ - --set assess.SLOs.upper.http/latency-p90=10 \ - --set assess.SLOs.upper.http/error-count=0 \ - --set runner=job - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report iter8 k log iter8 k delete @@ -194,82 +182,15 @@ jobs: iter8 k launch \ --localChart \ --chartName charts/iter8 \ - --set "tasks={http,assess}" \ + --set "tasks={http}" \ --set http.endpoints.get.url=http://httpbin.default/get \ --set http.endpoints.getAnything.url=http://httpbin.default/anything \ --set http.endpoints.post.url=http://httpbin.default/post \ --set http.endpoints.post.payloadStr=hello \ - --set assess.SLOs.upper.http-get/error-count=0 \ - --set assess.SLOs.upper.http-get/latency-mean=50 \ - --set assess.SLOs.upper.http-getAnything/error-count=0 \ - --set assess.SLOs.upper.http-getAnything/latency-mean=100 \ - --set assess.SLOs.upper.http-post/error-count=0 \ - --set assess.SLOs.upper.http-post/latency-mean=150 \ - --set runner=job - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report - iter8 k log - iter8 k delete - - http-looped-experiment: - name: HTTP looped load test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create app - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={http,assess}" \ - --set http.url="http://httpbin.default/get" \ - --set assess.SLOs.upper.http/latency-p50=5 \ - --set assess.SLOs.upper.http/latency-p90=10 \ - --set assess.SLOs.upper.http/error-count=0 \ - --set runner=cronjob \ - --set cronjobSchedule="*/1 * * * *" - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k assert -c nofailure --timeout 300s - iter8 k report iter8 k log iter8 k delete @@ -324,7 +245,7 @@ jobs: iter8 k launch \ --localChart \ --chartName charts/iter8 \ - --set "tasks={ready,grpc,assess}" \ + --set "tasks={ready,grpc}" \ --set ready.deploy=routeguide \ --set ready.service=routeguide \ --set ready.timeout=60s \ @@ -332,17 +253,10 @@ jobs: --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ --set grpc.call=routeguide.RouteGuide.GetFeature \ --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - --set assess.SLOs.upper.grpc/error-rate=0 \ - --set assess.SLOs.upper.grpc/latency/mean=200 \ - --set assess.SLOs.upper.grpc/latency/p90=500 \ - --set assess.SLOs.upper.grpc/latency/p'97\.5'=600 \ - --set runner=job - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report iter8 k log iter8 k delete @@ -397,7 +311,7 @@ jobs: iter8 k launch \ --localChart \ --chartName charts/iter8 \ - --set "tasks={ready,grpc,assess}" \ + --set "tasks={ready,grpc}" \ --set ready.deploy=routeguide \ --set ready.service=routeguide \ --set ready.timeout=60s \ @@ -407,17 +321,10 @@ jobs: --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ - --set assess.SLOs.upper.grpc-getFeature/error-rate=0 \ - --set assess.SLOs.upper.grpc-getFeature/latency/mean=50 \ - --set assess.SLOs.upper.grpc-listFeatures/error-rate=0 \ - --set assess.SLOs.upper.grpc-listFeatures/latency/mean=100 \ - --set runner=job - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report iter8 k log iter8 k delete @@ -463,137 +370,17 @@ jobs: iter8 k launch \ --localChart \ --chartName charts/iter8 \ - --set "tasks={grpc,assess}" \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set assess.SLOs.upper.grpc/error-rate=0 \ - --set assess.SLOs.upper.grpc/latency/mean=200 \ - --set assess.SLOs.upper.grpc/latency/p90=500 \ - --set assess.SLOs.upper.grpc/latency/p'97\.5'=600 \ - --set runner=job - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report - iter8 k log - iter8 k delete - - grpc-looped-experiment: - name: gRPC looped load test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create app - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 - kubectl expose deploy hello --port=50051 - kubectl wait --for=condition=available --timeout=60s deploy/hello - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={grpc,assess}" \ + --set "tasks={grpc}" \ --set grpc.host="hello.default:50051" \ --set grpc.call="helloworld.Greeter.SayHello" \ --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set assess.SLOs.upper.grpc/error-rate=0 \ - --set assess.SLOs.upper.grpc/latency/mean=200 \ - --set assess.SLOs.upper.grpc/latency/p90=500 \ - --set assess.SLOs.upper.grpc/latency/p'97\.5'=600 \ - --set runner=cronjob \ - --set cronjobSchedule="*/1 * * * *" - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - iter8 k assert -c nofailure -c slos --timeout 300s - iter8 k report iter8 k log iter8 k delete - autox: - name: AutoX test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/autox folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/autox - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create namespace - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create namespace argocd - - - name: Start AutoX controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - helm install autox charts/autox \ - --set 'groups.httpbin.trigger.name=httpbin' \ - --set 'groups.httpbin.trigger.namespace=default' \ - --set 'groups.httpbin.trigger.group=apps' \ - --set 'groups.httpbin.trigger.version=v1' \ - --set 'groups.httpbin.trigger.resource=deployments' \ - --set 'groups.httpbin.specs.iter8.name=iter8' \ - --set 'groups.httpbin.specs.iter8.values.tasks={ready,http}' \ - --set 'groups.httpbin.specs.iter8.values.ready.deploy=httpbin' \ - --set 'groups.httpbin.specs.iter8.values.ready.service=httpbin' \ - --set 'groups.httpbin.specs.iter8.values.ready.timeout=60s' \ - --set 'groups.httpbin.specs.iter8.values.http.url=http://httpbin.default/get' \ - --set 'groups.httpbin.specs.iter8.version=0.15.0' \ - --set 'groups.httpbin.specs.iter8.values.runner=job' - - - name: Check AutoX controller - if: steps.modified-files.outputs.any_modified == 'true' - run: kubectl wait --for=condition=available --timeout=60s deploy/autox -n argocd - traffic: name: Traffic test needs: get_versions diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index 5fd9f9d6d..bd489ade7 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -52,44 +52,32 @@ jobs: - name: load-test-http in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,http,assess}" \ + --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ --set ready.timeout=60s \ --set http.url=http://httpbin.default/get \ --set http.duration="3s" \ - --set assess.SLOs.upper.http/latency-p50=5 \ - --set assess.SLOs.upper.http/latency-p90=10 \ - --set assess.SLOs.upper.http/error-count=0 \ - --set runner=job - iter8 k assert -c completed -c nofailure -c slos --timeout 60s - iter8 k report iter8 k log iter8 k delete - name: load-test-http with payload in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,http,assess}" \ + --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ --set ready.timeout=60s \ --set http.url=http://httpbin.default/post \ --set http.payloadStr=hello \ --set http.duration="3s" \ - --set assess.SLOs.upper.http/latency-p50=5 \ - --set assess.SLOs.upper.http/latency-p90=10 \ - --set assess.SLOs.upper.http/error-count=0 \ - --set runner=job - iter8 k assert -c completed -c nofailure -c slos --timeout 60s - iter8 k report iter8 k log iter8 k delete - name: load-test-http with multiple endpoints in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,http,assess}" \ + --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ --set ready.timeout=60s \ @@ -98,15 +86,6 @@ jobs: --set http.endpoints.post.url=http://httpbin.default/post \ --set http.endpoints.post.payloadStr=hello \ --set http.duration="3s" \ - --set assess.SLOs.upper.http-get/error-count=0 \ - --set assess.SLOs.upper.http-get/latency-mean=50 \ - --set assess.SLOs.upper.http-getAnything/error-count=0 \ - --set assess.SLOs.upper.http-getAnything/latency-mean=100 \ - --set assess.SLOs.upper.http-post/error-count=0 \ - --set assess.SLOs.upper.http-post/latency-mean=150 \ - --set runner=job - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report iter8 k log iter8 k delete @@ -145,7 +124,7 @@ jobs: - name: load test grpc service run: | iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,grpc,assess}" \ + --set "tasks={ready,grpc}" \ --set ready.deploy=routeguide \ --set ready.service=routeguide \ --set ready.timeout=60s \ @@ -153,19 +132,13 @@ jobs: --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ --set grpc.call=routeguide.RouteGuide.GetFeature \ --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - --set assess.SLOs.upper.grpc/error-rate=0 \ - --set assess.SLOs.upper.grpc/latency/mean=200 \ - --set assess.SLOs.upper.grpc/latency/p'97\.5'=800 \ - --set runner=job - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report iter8 k log iter8 k delete - name: load test grpc service with multiple endpoints run: | iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,grpc,assess}" \ + --set "tasks={ready,grpc}" \ --set ready.deploy=routeguide \ --set ready.service=routeguide \ --set ready.timeout=60s \ @@ -175,13 +148,6 @@ jobs: --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ - --set assess.SLOs.upper.grpc-getFeature/error-rate=0 \ - --set assess.SLOs.upper.grpc-getFeature/latency/mean=50 \ - --set assess.SLOs.upper.grpc-listFeatures/error-rate=0 \ - --set assess.SLOs.upper.grpc-listFeatures/latency/mean=100 \ - --set runner=job - iter8 k assert -c completed -c nofailure -c slos --timeout 300s - iter8 k report iter8 k log iter8 k delete @@ -211,7 +177,7 @@ jobs: - name: load test grpc service with protoURL run: | iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,grpc,assess}" \ + --set "tasks={ready,grpc}" \ --set ready.deploy=hello \ --set ready.service=hello \ --set ready.timeout=60s \ @@ -219,21 +185,13 @@ jobs: --set grpc.call="helloworld.Greeter.SayHello" \ --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ --set grpc.data.name="frodo" \ - --set assess.SLOs.upper.grpc/error-rate=0 \ - --set assess.SLOs.upper.grpc/latency/mean=200 \ - --set assess.SLOs.upper.grpc/latency/p90=500 \ - --set assess.SLOs.upper.grpc/latency/p'97\.5'=600 \ - --set runner=job - iter8 k assert -c completed -c nofailure -c slos --timeout 60s - iter8 k report iter8 k log iter8 k delete - name: load test grpc service with proto/data/metadata URLs run: | - iter8 k launch --localChart -l trace \ - --chartName charts/iter8 \ - --set "tasks={ready,grpc,assess}" \ + iter8 k launch --localChart --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ --set ready.deploy=hello \ --set ready.service=hello \ --set ready.timeout=60s \ @@ -242,12 +200,5 @@ jobs: --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ - --set assess.SLOs.upper.grpc/error-rate=0 \ - --set assess.SLOs.upper.grpc/latency/mean=200 \ - --set assess.SLOs.upper.grpc/latency/p90=500 \ - --set assess.SLOs.upper.grpc/latency/p'97\.5'=600 \ - --set runner=job - iter8 k assert -c completed -c nofailure -c slos --timeout 60s - iter8 k report iter8 k log iter8 k delete From 6bff9e2be4806fd79b0869a6b602d0b10e26e4d0 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 21 Aug 2023 09:17:54 -0400 Subject: [PATCH 074/121] Fix tests Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 3 +++ .github/workflows/testcharts.yaml | 6 ++++++ .github/workflows/testperformance.yaml | 22 +++++++++++++++------- base/metrics.go | 2 ++ charts/iter8/Chart.yaml | 2 +- 5 files changed, 27 insertions(+), 8 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 9cae0ef33..07dd6c161 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -109,6 +109,7 @@ jobs: --set http.url="http://httpbin.default/get" - name: try other iter8 k commands run: | + sleep 60 iter8 k log iter8 k delete @@ -147,6 +148,7 @@ jobs: --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - name: try other iter8 k commands run: | + sleep 60 iter8 k log iter8 k delete @@ -175,6 +177,7 @@ jobs: --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" - name: try other iter8 k commands run: | + sleep 60 iter8 k log iter8 k delete diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index 16a995fc0..a019338ee 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -85,6 +85,7 @@ jobs: - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | + sleep 60 iter8 k log iter8 k delete @@ -137,6 +138,7 @@ jobs: - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | + sleep 60 iter8 k log iter8 k delete @@ -191,6 +193,7 @@ jobs: - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | + sleep 60 iter8 k log iter8 k delete @@ -257,6 +260,7 @@ jobs: - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | + sleep 60 iter8 k log iter8 k delete @@ -325,6 +329,7 @@ jobs: - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | + sleep 60 iter8 k log iter8 k delete @@ -378,6 +383,7 @@ jobs: - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | + sleep 60 iter8 k log iter8 k delete diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index bd489ade7..b390a4370 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -5,6 +5,7 @@ on: jobs: unit-test: + name: Unit test runs-on: ubuntu-latest steps: - name: Install Go @@ -57,7 +58,8 @@ jobs: --set ready.service=httpbin \ --set ready.timeout=60s \ --set http.url=http://httpbin.default/get \ - --set http.duration="3s" \ + --set http.duration="3s" + sleep 60 iter8 k log iter8 k delete @@ -70,7 +72,8 @@ jobs: --set ready.timeout=60s \ --set http.url=http://httpbin.default/post \ --set http.payloadStr=hello \ - --set http.duration="3s" \ + --set http.duration="3s" + sleep 60 iter8 k log iter8 k delete @@ -85,7 +88,8 @@ jobs: --set http.endpoints.getAnything.url=http://httpbin.default/anything \ --set http.endpoints.post.url=http://httpbin.default/post \ --set http.endpoints.post.payloadStr=hello \ - --set http.duration="3s" \ + --set http.duration="3s" + sleep 60 iter8 k log iter8 k delete @@ -131,7 +135,8 @@ jobs: --set grpc.host=routeguide.default:50051 \ --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ --set grpc.call=routeguide.RouteGuide.GetFeature \ - --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json + sleep 60 iter8 k log iter8 k delete @@ -147,7 +152,8 @@ jobs: --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ - --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ + --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json + sleep 60 iter8 k log iter8 k delete @@ -184,7 +190,8 @@ jobs: --set grpc.host="hello.default:50051" \ --set grpc.call="helloworld.Greeter.SayHello" \ --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set grpc.data.name="frodo" \ + --set grpc.data.name="frodo" + sleep 60 iter8 k log iter8 k delete @@ -199,6 +206,7 @@ jobs: --set grpc.call="helloworld.Greeter.SayHello" \ --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ - --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ + --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" + sleep 60 iter8 k log iter8 k delete diff --git a/base/metrics.go b/base/metrics.go index eb25396b1..396ff5dcf 100644 --- a/base/metrics.go +++ b/base/metrics.go @@ -25,6 +25,7 @@ const ( GRPCDashboardPath = "/grpcDashboard" ) +// callMetricsService is a general function that can be used to send data to the metrics service func callMetricsService(method, metricsServerURL, path string, queryParams map[string]string, payload interface{}) error { // handle URL and URL parameters u, err := url.ParseRequestURI(metricsServerURL + path) @@ -78,6 +79,7 @@ func callMetricsService(method, metricsServerURL, path string, queryParams map[s return nil } +// PutExperimentResultToMetricsService sends the experiment result to the metrics service func PutExperimentResultToMetricsService(metricsServerURL, namespace, experiment string, experimentResult *ExperimentResult) error { return callMetricsService(http.MethodPut, metricsServerURL, ExperimentResultPath, map[string]string{ "namespace": namespace, diff --git a/charts/iter8/Chart.yaml b/charts/iter8/Chart.yaml index ef75e238c..497ff0186 100644 --- a/charts/iter8/Chart.yaml +++ b/charts/iter8/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: iter8 -version: 0.15.1 +version: 0.15.2 description: Iter8 experiment chart type: application home: https://iter8.tools From 8d26440aca7294e8d77c12ded67e49f8d90f0ba1 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 21 Aug 2023 13:31:33 -0400 Subject: [PATCH 075/121] Rename traffic controller to controller Signed-off-by: Alan Cha --- .github/workflows/testcharts.yaml | 16 +++++----- .github/workflows/testkustomize.yaml | 32 +++++++++---------- charts/{traffic => controller}/.helmignore | 0 charts/{traffic => controller}/Chart.yaml | 6 ++-- .../templates/_helpers.tpl | 6 ++-- .../templates/configmap.yaml | 0 .../templates/persistentvolumeclaim.yaml | 2 +- .../templates/roles.yaml | 4 +-- .../templates/service.yaml | 2 +- .../templates/serviceaccount.yaml | 2 +- .../templates/statefulset.yaml | 8 ++--- .../testdata/values.yaml | 0 charts/{traffic => controller}/values.yaml | 0 .../iter8/namespaceScoped/kustomization.yaml | 2 +- kustomize/iter8/namespaceScoped/service.yaml | 2 +- .../iter8/namespaceScoped/statefulset.yaml | 6 ++-- 16 files changed, 44 insertions(+), 44 deletions(-) rename charts/{traffic => controller}/.helmignore (100%) rename charts/{traffic => controller}/Chart.yaml (83%) rename charts/{traffic => controller}/templates/_helpers.tpl (70%) rename charts/{traffic => controller}/templates/configmap.yaml (100%) rename charts/{traffic => controller}/templates/persistentvolumeclaim.yaml (83%) rename charts/{traffic => controller}/templates/roles.yaml (92%) rename charts/{traffic => controller}/templates/service.yaml (79%) rename charts/{traffic => controller}/templates/serviceaccount.yaml (62%) rename charts/{traffic => controller}/templates/statefulset.yaml (88%) rename charts/{traffic => controller}/testdata/values.yaml (100%) rename charts/{traffic => controller}/values.yaml (100%) diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index a019338ee..81264923d 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -387,8 +387,8 @@ jobs: iter8 k log iter8 k delete - traffic: - name: Traffic test + controller: + name: Controller test needs: get_versions runs-on: ubuntu-latest strategy: @@ -399,11 +399,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 - - name: Get modified files in the charts/traffic folder + - name: Get modified files in the charts/controller folder id: modified-files uses: tj-actions/changed-files@v35 with: - files: charts/traffic + files: charts/controller - name: Start kind cluster ${{ matrix.version }} uses: helm/kind-action@v1.5.0 @@ -412,12 +412,12 @@ jobs: wait: 300s node_image: ${{ matrix.version }} - - name: Start traffic + - name: Start controller if: steps.modified-files.outputs.any_modified == 'true' run: | - helm install traffic charts/traffic -f charts/traffic/testdata/values.yaml + helm install controller charts/controller -f charts/controller/testdata/values.yaml - - name: Check traffic + - name: Check controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/traffic + kubectl rollout status --watch --timeout=60s statefulset.apps/controller diff --git a/.github/workflows/testkustomize.yaml b/.github/workflows/testkustomize.yaml index a7adac5ea..e2aef8a47 100644 --- a/.github/workflows/testkustomize.yaml +++ b/.github/workflows/testkustomize.yaml @@ -35,8 +35,8 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} - traffic: - name: Traffic test + controller: + name: Controller test needs: get_versions runs-on: ubuntu-latest strategy: @@ -47,11 +47,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 - - name: Get modified files in the charts/traffic folder + - name: Get modified files in the charts/controller folder id: modified-files uses: tj-actions/changed-files@v35 with: - files: charts/traffic + files: charts/controller - name: Start kind cluster ${{ matrix.version }} uses: helm/kind-action@v1.5.0 @@ -60,18 +60,18 @@ jobs: wait: 300s node_image: ${{ matrix.version }} - - name: Start traffic + - name: Start controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl apply -k kustomize/traffic/namespaceScoped + kubectl apply -k kustomize/controller/namespaceScoped - - name: Check traffic + - name: Check controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/iter8-traffic + kubectl rollout status --watch --timeout=60s statefulset.apps/iter8-controller - traffic-clusterScoped: - name: Traffic cluster scoped test + controller-clusterScoped: + name: Controller cluster scoped test needs: get_versions runs-on: ubuntu-latest strategy: @@ -82,11 +82,11 @@ jobs: - name: Check out code uses: actions/checkout@v3 - - name: Get modified files in the charts/traffic folder + - name: Get modified files in the charts/controller folder id: modified-files uses: tj-actions/changed-files@v35 with: - files: charts/traffic + files: charts/controller - name: Start kind cluster ${{ matrix.version }} uses: helm/kind-action@v1.5.0 @@ -95,12 +95,12 @@ jobs: wait: 300s node_image: ${{ matrix.version }} - - name: Start traffic + - name: Start controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl apply -k kustomize/traffic/clusterScoped + kubectl apply -k kustomize/controller/clusterScoped - - name: Check traffic + - name: Check controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/iter8-traffic \ No newline at end of file + kubectl rollout status --watch --timeout=60s statefulset.apps/iter8-controller \ No newline at end of file diff --git a/charts/traffic/.helmignore b/charts/controller/.helmignore similarity index 100% rename from charts/traffic/.helmignore rename to charts/controller/.helmignore diff --git a/charts/traffic/Chart.yaml b/charts/controller/Chart.yaml similarity index 83% rename from charts/traffic/Chart.yaml rename to charts/controller/Chart.yaml index c9c811a74..1737275e8 100644 --- a/charts/traffic/Chart.yaml +++ b/charts/controller/Chart.yaml @@ -1,11 +1,11 @@ apiVersion: v2 -name: traffic +name: controller version: 0.1.10 -description: Iter8 traffic controller +description: Iter8 controller controller type: application keywords: - Iter8 -- traffic +- controller - experiment home: https://iter8.tools sources: diff --git a/charts/traffic/templates/_helpers.tpl b/charts/controller/templates/_helpers.tpl similarity index 70% rename from charts/traffic/templates/_helpers.tpl rename to charts/controller/templates/_helpers.tpl index 382221e2d..3c50cdff6 100644 --- a/charts/traffic/templates/_helpers.tpl +++ b/charts/controller/templates/_helpers.tpl @@ -1,10 +1,10 @@ -{{- define "iter8-traffic.name" -}} +{{- define "iter8-controller.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} -{{- define "iter8-traffic.labels" -}} +{{- define "iter8-controller.labels" -}} labels: - app.kubernetes.io/name: {{ template "iter8-traffic.name" . }} + app.kubernetes.io/name: {{ template "iter8-controller.name" . }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/traffic/templates/configmap.yaml b/charts/controller/templates/configmap.yaml similarity index 100% rename from charts/traffic/templates/configmap.yaml rename to charts/controller/templates/configmap.yaml diff --git a/charts/traffic/templates/persistentvolumeclaim.yaml b/charts/controller/templates/persistentvolumeclaim.yaml similarity index 83% rename from charts/traffic/templates/persistentvolumeclaim.yaml rename to charts/controller/templates/persistentvolumeclaim.yaml index fbe540a77..34ef849b6 100644 --- a/charts/traffic/templates/persistentvolumeclaim.yaml +++ b/charts/controller/templates/persistentvolumeclaim.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: {{ .Release.Name }} - {{ template "iter8-traffic.labels" . }} + {{ template "iter8-controller.labels" . }} spec: accessModes: - ReadWriteOnce diff --git a/charts/traffic/templates/roles.yaml b/charts/controller/templates/roles.yaml similarity index 92% rename from charts/traffic/templates/roles.yaml rename to charts/controller/templates/roles.yaml index f236a6133..ef8a63540 100644 --- a/charts/traffic/templates/roles.yaml +++ b/charts/controller/templates/roles.yaml @@ -6,7 +6,7 @@ kind: Role {{- end }} metadata: name: {{ $.Release.Name }} - {{ template "iter8-traffic.labels" $ }} + {{ template "iter8-controller.labels" $ }} rules: {{- range $typeName, $type := .Values.resourceTypes }} {{- if not $type.Resource }} @@ -31,7 +31,7 @@ kind: RoleBinding {{- end }} metadata: name: {{ $.Release.Name }} - {{ template "iter8-traffic.labels" $ }} + {{ template "iter8-controller.labels" $ }} subjects: - kind: ServiceAccount name: {{ $.Release.Name }} diff --git a/charts/traffic/templates/service.yaml b/charts/controller/templates/service.yaml similarity index 79% rename from charts/traffic/templates/service.yaml rename to charts/controller/templates/service.yaml index 3838ca9c4..ab8b8d2d1 100644 --- a/charts/traffic/templates/service.yaml +++ b/charts/controller/templates/service.yaml @@ -4,7 +4,7 @@ metadata: name: {{ .Release.Name }} spec: selector: - app.kubernetes.io/name: {{ template "iter8-traffic.name" . }} + app.kubernetes.io/name: {{ template "iter8-controller.name" . }} ports: - name: grpc port: {{ .Values.abn.port }} diff --git a/charts/traffic/templates/serviceaccount.yaml b/charts/controller/templates/serviceaccount.yaml similarity index 62% rename from charts/traffic/templates/serviceaccount.yaml rename to charts/controller/templates/serviceaccount.yaml index 81bc37728..28f4db508 100644 --- a/charts/traffic/templates/serviceaccount.yaml +++ b/charts/controller/templates/serviceaccount.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ .Release.Name }} - {{ template "iter8-traffic.labels" . }} \ No newline at end of file + {{ template "iter8-controller.labels" . }} \ No newline at end of file diff --git a/charts/traffic/templates/statefulset.yaml b/charts/controller/templates/statefulset.yaml similarity index 88% rename from charts/traffic/templates/statefulset.yaml rename to charts/controller/templates/statefulset.yaml index e6a565e20..e2d1be856 100644 --- a/charts/traffic/templates/statefulset.yaml +++ b/charts/controller/templates/statefulset.yaml @@ -2,21 +2,21 @@ apiVersion: apps/v1 kind: StatefulSet metadata: name: {{ .Release.Name }} - {{ template "iter8-traffic.labels" . }} + {{ template "iter8-controller.labels" . }} spec: serviceName: {{ .Release.Name }} selector: matchLabels: - app.kubernetes.io/name: {{ template "iter8-traffic.name" . }} + app.kubernetes.io/name: {{ template "iter8-controller.name" . }} template: metadata: labels: - app.kubernetes.io/name: {{ template "iter8-traffic.name" . }} + app.kubernetes.io/name: {{ template "iter8-controller.name" . }} spec: terminationGracePeriodSeconds: 10 serviceAccountName: {{ .Release.Name }} containers: - - name: iter8-traffic + - name: iter8-controller image: {{ .Values.image }} imagePullPolicy: Always command: ["/bin/iter8"] diff --git a/charts/traffic/testdata/values.yaml b/charts/controller/testdata/values.yaml similarity index 100% rename from charts/traffic/testdata/values.yaml rename to charts/controller/testdata/values.yaml diff --git a/charts/traffic/values.yaml b/charts/controller/values.yaml similarity index 100% rename from charts/traffic/values.yaml rename to charts/controller/values.yaml diff --git a/kustomize/iter8/namespaceScoped/kustomization.yaml b/kustomize/iter8/namespaceScoped/kustomization.yaml index e356ac9ea..09f3fd598 100644 --- a/kustomize/iter8/namespaceScoped/kustomization.yaml +++ b/kustomize/iter8/namespaceScoped/kustomization.yaml @@ -8,5 +8,5 @@ resources: - statefulset.yaml commonLabels: - app.kubernetes.io/name: traffic + app.kubernetes.io/name: controller app.kubernetes.io/version: v0.15 diff --git a/kustomize/iter8/namespaceScoped/service.yaml b/kustomize/iter8/namespaceScoped/service.yaml index 3c1cc4698..19c66f0ff 100644 --- a/kustomize/iter8/namespaceScoped/service.yaml +++ b/kustomize/iter8/namespaceScoped/service.yaml @@ -5,7 +5,7 @@ metadata: spec: clusterIP: None selector: - app.kubernetes.io/name: traffic + app.kubernetes.io/name: controller ports: - name: grpc port: 50051 diff --git a/kustomize/iter8/namespaceScoped/statefulset.yaml b/kustomize/iter8/namespaceScoped/statefulset.yaml index 8b7bcc377..2ad1016f2 100644 --- a/kustomize/iter8/namespaceScoped/statefulset.yaml +++ b/kustomize/iter8/namespaceScoped/statefulset.yaml @@ -6,16 +6,16 @@ spec: serviceName: iter8 selector: matchLabels: - app.kubernetes.io/name: traffic + app.kubernetes.io/name: controller template: metadata: labels: - app.kubernetes.io/name: traffic + app.kubernetes.io/name: controller spec: terminationGracePeriodSeconds: 10 serviceAccountName: iter8 containers: - - name: iter8-traffic + - name: iter8-controller image: iter8/iter8:0.16 imagePullPolicy: Always command: ["/bin/iter8"] From 38b842bd0993a31ef6757dec465a8589aa64378f Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 21 Aug 2023 13:43:54 -0400 Subject: [PATCH 076/121] Fix test Signed-off-by: Alan Cha --- .github/workflows/testkustomize.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/testkustomize.yaml b/.github/workflows/testkustomize.yaml index e2aef8a47..c8d3c075e 100644 --- a/.github/workflows/testkustomize.yaml +++ b/.github/workflows/testkustomize.yaml @@ -63,7 +63,7 @@ jobs: - name: Start controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl apply -k kustomize/controller/namespaceScoped + kubectl apply -k kustomize/iter8/namespaceScoped - name: Check controller if: steps.modified-files.outputs.any_modified == 'true' @@ -98,7 +98,7 @@ jobs: - name: Start controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl apply -k kustomize/controller/clusterScoped + kubectl apply -k kustomize/iter8/clusterScoped - name: Check controller if: steps.modified-files.outputs.any_modified == 'true' From 92265f8f25afe30ee45d3408088df8191497a6f3 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 21 Aug 2023 14:53:45 -0400 Subject: [PATCH 077/121] Rename kustomize/iter8 to kustomize/controller Signed-off-by: Alan Cha --- .github/workflows/testkustomize.yaml | 8 ++++---- .../clusterScoped/kustomization.yaml | 0 .../{iter8 => controller}/namespaceScoped/configmap.yaml | 0 .../namespaceScoped/kustomization.yaml | 0 kustomize/{iter8 => controller}/namespaceScoped/pvc.yaml | 0 kustomize/{iter8 => controller}/namespaceScoped/role.yaml | 0 .../namespaceScoped/rolebinding.yaml | 0 .../{iter8 => controller}/namespaceScoped/service.yaml | 0 .../namespaceScoped/serviceaccount.yaml | 0 .../namespaceScoped/statefulset.yaml | 0 10 files changed, 4 insertions(+), 4 deletions(-) rename kustomize/{iter8 => controller}/clusterScoped/kustomization.yaml (100%) rename kustomize/{iter8 => controller}/namespaceScoped/configmap.yaml (100%) rename kustomize/{iter8 => controller}/namespaceScoped/kustomization.yaml (100%) rename kustomize/{iter8 => controller}/namespaceScoped/pvc.yaml (100%) rename kustomize/{iter8 => controller}/namespaceScoped/role.yaml (100%) rename kustomize/{iter8 => controller}/namespaceScoped/rolebinding.yaml (100%) rename kustomize/{iter8 => controller}/namespaceScoped/service.yaml (100%) rename kustomize/{iter8 => controller}/namespaceScoped/serviceaccount.yaml (100%) rename kustomize/{iter8 => controller}/namespaceScoped/statefulset.yaml (100%) diff --git a/.github/workflows/testkustomize.yaml b/.github/workflows/testkustomize.yaml index c8d3c075e..dc17ff7fa 100644 --- a/.github/workflows/testkustomize.yaml +++ b/.github/workflows/testkustomize.yaml @@ -63,12 +63,12 @@ jobs: - name: Start controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl apply -k kustomize/iter8/namespaceScoped + kubectl apply -k kustomize/controller/namespaceScoped - name: Check controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/iter8-controller + kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 controller-clusterScoped: name: Controller cluster scoped test @@ -98,9 +98,9 @@ jobs: - name: Start controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl apply -k kustomize/iter8/clusterScoped + kubectl apply -k kustomize/controller/clusterScoped - name: Check controller if: steps.modified-files.outputs.any_modified == 'true' run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/iter8-controller \ No newline at end of file + kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file diff --git a/kustomize/iter8/clusterScoped/kustomization.yaml b/kustomize/controller/clusterScoped/kustomization.yaml similarity index 100% rename from kustomize/iter8/clusterScoped/kustomization.yaml rename to kustomize/controller/clusterScoped/kustomization.yaml diff --git a/kustomize/iter8/namespaceScoped/configmap.yaml b/kustomize/controller/namespaceScoped/configmap.yaml similarity index 100% rename from kustomize/iter8/namespaceScoped/configmap.yaml rename to kustomize/controller/namespaceScoped/configmap.yaml diff --git a/kustomize/iter8/namespaceScoped/kustomization.yaml b/kustomize/controller/namespaceScoped/kustomization.yaml similarity index 100% rename from kustomize/iter8/namespaceScoped/kustomization.yaml rename to kustomize/controller/namespaceScoped/kustomization.yaml diff --git a/kustomize/iter8/namespaceScoped/pvc.yaml b/kustomize/controller/namespaceScoped/pvc.yaml similarity index 100% rename from kustomize/iter8/namespaceScoped/pvc.yaml rename to kustomize/controller/namespaceScoped/pvc.yaml diff --git a/kustomize/iter8/namespaceScoped/role.yaml b/kustomize/controller/namespaceScoped/role.yaml similarity index 100% rename from kustomize/iter8/namespaceScoped/role.yaml rename to kustomize/controller/namespaceScoped/role.yaml diff --git a/kustomize/iter8/namespaceScoped/rolebinding.yaml b/kustomize/controller/namespaceScoped/rolebinding.yaml similarity index 100% rename from kustomize/iter8/namespaceScoped/rolebinding.yaml rename to kustomize/controller/namespaceScoped/rolebinding.yaml diff --git a/kustomize/iter8/namespaceScoped/service.yaml b/kustomize/controller/namespaceScoped/service.yaml similarity index 100% rename from kustomize/iter8/namespaceScoped/service.yaml rename to kustomize/controller/namespaceScoped/service.yaml diff --git a/kustomize/iter8/namespaceScoped/serviceaccount.yaml b/kustomize/controller/namespaceScoped/serviceaccount.yaml similarity index 100% rename from kustomize/iter8/namespaceScoped/serviceaccount.yaml rename to kustomize/controller/namespaceScoped/serviceaccount.yaml diff --git a/kustomize/iter8/namespaceScoped/statefulset.yaml b/kustomize/controller/namespaceScoped/statefulset.yaml similarity index 100% rename from kustomize/iter8/namespaceScoped/statefulset.yaml rename to kustomize/controller/namespaceScoped/statefulset.yaml From f5d0e1bec9edebc580856a91ea45a5e3b48c213d Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Mon, 21 Aug 2023 15:26:16 -0400 Subject: [PATCH 078/121] Update Grafana dashboard to use ExperimentResult Signed-off-by: Alan Cha --- grafana/grpc.json | 8 ++++---- grafana/http.json | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/grafana/grpc.json b/grafana/grpc.json index ee945c3ef..e6e393ded 100644 --- a/grafana/grpc.json +++ b/grafana/grpc.json @@ -139,7 +139,7 @@ }, "fields": [ { - "jsonPath": "$.Summary.Failure" + "jsonPath": "$.ExperimentResult.Failure" } ], "method": "GET", @@ -209,7 +209,7 @@ }, "fields": [ { - "jsonPath": "$.Summary.Completed tasks" + "jsonPath": "$.ExperimentResult.Completed tasks" } ], "method": "GET", @@ -294,7 +294,7 @@ }, "fields": [ { - "jsonPath": "$.Summary" + "jsonPath": "$.ExperimentResult" } ], "method": "GET", @@ -309,7 +309,7 @@ "id": "extractFields", "options": { "replace": true, - "source": "Summary" + "source": "ExperimentResult" } }, { diff --git a/grafana/http.json b/grafana/http.json index adcab66a6..9975b9ad0 100644 --- a/grafana/http.json +++ b/grafana/http.json @@ -145,7 +145,7 @@ }, "fields": [ { - "jsonPath": "$.Summary.Failure" + "jsonPath": "$.ExperimentResult.Failure" } ], "method": "GET", @@ -215,7 +215,7 @@ }, "fields": [ { - "jsonPath": "$.Summary.Completed tasks" + "jsonPath": "$.ExperimentResult.Completed tasks" } ], "method": "GET", @@ -285,8 +285,8 @@ "values": false }, "text": { - "titleSize": 30, - "valueSize": 30 + "titleSize": 20, + "valueSize": 20 }, "textMode": "auto" }, @@ -300,7 +300,7 @@ }, "fields": [ { - "jsonPath": "$.Summary" + "jsonPath": "$.ExperimentResult" } ], "method": "GET", @@ -315,7 +315,7 @@ "id": "extractFields", "options": { "replace": true, - "source": "Summary" + "source": "ExperimentResult" } }, { @@ -847,6 +847,6 @@ "timezone": "", "title": "HTTP Performance", "uid": "e8758667-b4e1-41c2-9bf4-446dc7c1fd27", - "version": 8, + "version": 4, "weekStart": "" } \ No newline at end of file From 7875f080d4092b6744e0368973c0a5ca04b920b0 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 22 Aug 2023 17:39:00 -0400 Subject: [PATCH 079/121] Comment other workflows Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 444 +++++++------- .github/workflows/draftrelease.yaml | 32 +- .github/workflows/golangci-lint.yml | 80 +-- .github/workflows/linkcheck.yaml | 50 +- .github/workflows/lintcharts.yaml | 108 ++-- .github/workflows/lintcharts2.yaml | 128 ++--- .github/workflows/releasecharts.yaml | 58 +- .github/workflows/spellcheck.yaml | 36 +- .github/workflows/testcharts.yaml | 832 +++++++++++++-------------- .github/workflows/testkustomize.yaml | 208 +++---- .github/workflows/versionbump.yaml | 176 +++--- 11 files changed, 1076 insertions(+), 1076 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 07dd6c161..848744b6f 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -1,232 +1,232 @@ -name: Publish binaries and Docker image +# name: Publish binaries and Docker image -on: - release: - types: [published] +# on: +# release: +# types: [published] -jobs: - assets: - name: Publish binaries - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: Build binaries - run: | - VERSION=${GITHUB_REF#refs/*/} - echo "Version: ${VERSION}" - make dist - - name: Upload binaries to release - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: _dist/iter8-*.tar.gz - tag: ${{ github.ref }} - overwrite: true - file_glob: true - - name: Create checksum - run: | - VERSION=${GITHUB_REF#refs/*/} - echo "VERSION=$VERSION" >> $GITHUB_ENV - wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip - sha256sum ${VERSION}.zip > checksum.txt - wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz - sha256sum ${VERSION}.tar.gz >> checksum.txt - cd _dist - for f in iter8-*.tar.gz - do - sha256sum ${f} >> ../checksum.txt - done - # pick up darwin checksum and export it - echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV - - name: Upload checksum to release - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - asset_name: checksum.txt - file: checksum.txt - tag: ${{ github.ref }} - overwrite: true +# jobs: +# assets: +# name: Publish binaries +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - uses: actions/checkout@v3 +# - name: Build binaries +# run: | +# VERSION=${GITHUB_REF#refs/*/} +# echo "Version: ${VERSION}" +# make dist +# - name: Upload binaries to release +# uses: svenstaro/upload-release-action@v2 +# with: +# repo_token: ${{ secrets.GITHUB_TOKEN }} +# file: _dist/iter8-*.tar.gz +# tag: ${{ github.ref }} +# overwrite: true +# file_glob: true +# - name: Create checksum +# run: | +# VERSION=${GITHUB_REF#refs/*/} +# echo "VERSION=$VERSION" >> $GITHUB_ENV +# wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip +# sha256sum ${VERSION}.zip > checksum.txt +# wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz +# sha256sum ${VERSION}.tar.gz >> checksum.txt +# cd _dist +# for f in iter8-*.tar.gz +# do +# sha256sum ${f} >> ../checksum.txt +# done +# # pick up darwin checksum and export it +# echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV +# - name: Upload checksum to release +# uses: svenstaro/upload-release-action@v2 +# with: +# repo_token: ${{ secrets.GITHUB_TOKEN }} +# asset_name: checksum.txt +# file: checksum.txt +# tag: ${{ github.ref }} +# overwrite: true - build-and-push: - name: Push Iter8 image to Docker Hub - needs: assets - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - name: Get version - run: | - tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') - # Strip "v" prefix from tagref - echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV - echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV - - name: Get owner - run: | - ownerrepo=${{ github.repository }} - owner=$(echo $ownerrepo | cut -f1 -d/) - if [[ "$owner" == "iter8-tools" ]]; then - owner=iter8 - fi - echo "OWNER=$owner" >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_SECRET }} - - uses: docker/build-push-action@v4 - with: - platforms: linux/amd64,linux/arm64 - tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest - push: true - build-args: | - TAG=v${{ env.VERSION }} +# build-and-push: +# name: Push Iter8 image to Docker Hub +# needs: assets +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v3 +# with: +# fetch-depth: 0 +# - name: Get version +# run: | +# tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') +# # Strip "v" prefix from tagref +# echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV +# echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV +# - name: Get owner +# run: | +# ownerrepo=${{ github.repository }} +# owner=$(echo $ownerrepo | cut -f1 -d/) +# if [[ "$owner" == "iter8-tools" ]]; then +# owner=iter8 +# fi +# echo "OWNER=$owner" >> $GITHUB_ENV +# - uses: docker/setup-buildx-action@v2 +# - uses: docker/login-action@v2 +# with: +# username: ${{ secrets.DOCKERHUB_USERNAME }} +# password: ${{ secrets.DOCKERHUB_SECRET }} +# - uses: docker/build-push-action@v4 +# with: +# platforms: linux/amd64,linux/arm64 +# tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest +# push: true +# build-args: | +# TAG=v${{ env.VERSION }} - kubernetes-http-experiment: - name: Kubernetes HTTP load test - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create app - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - name: iter8 k launch - run: | - iter8 k launch \ - --set tasks={http} \ - --set http.url="http://httpbin.default/get" - - name: try other iter8 k commands - run: | - sleep 60 - iter8 k log - iter8 k delete +# kubernetes-http-experiment: +# name: Kubernetes HTTP load test +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Create app +# run: | +# kubectl create deployment httpbin --image=kennethreitz/httpbin +# kubectl expose deployment httpbin --type=ClusterIP --port=80 +# kubectl wait --for=condition=available --timeout=60s deploy/httpbin +# - name: iter8 k launch +# run: | +# iter8 k launch \ +# --set tasks={http} \ +# --set http.url="http://httpbin.default/get" +# - name: try other iter8 k commands +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete - kubernetes-grpc-experiment: - name: Kubernetes gRPC load test - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create app - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide +# kubernetes-grpc-experiment: +# name: Kubernetes gRPC load test +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Create app +# run: | +# kubectl create deployment routeguide --image=golang --port=50051 \ +# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" +# kubectl expose deployment routeguide --port=50051 +# kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - name: Test gRPC service with grpcurl - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep +# - name: Test gRPC service with grpcurl +# run: | +# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml +# kubectl apply -f grpcurl-routeguide.yaml +# sleep 180 +# kubectl logs deploy/sleep - - name: iter8 k launch - run: | - iter8 k launch \ - --set tasks={grpc} \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.call=routeguide.RouteGuide.GetFeature \ - --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - - name: try other iter8 k commands - run: | - sleep 60 - iter8 k log - iter8 k delete +# - name: iter8 k launch +# run: | +# iter8 k launch \ +# --set tasks={grpc} \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ +# - name: try other iter8 k commands +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete - kubernetes-grpc-experiment2: - name: Kubernetes gRPC load test 2 - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create app - run: | - kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 - kubectl expose deploy hello --port=50051 - kubectl wait --for=condition=available --timeout=60s deploy/hello - - name: iter8 k launch - run: | - iter8 k launch \ - --set tasks={grpc} \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" - - name: try other iter8 k commands - run: | - sleep 60 - iter8 k log - iter8 k delete +# kubernetes-grpc-experiment2: +# name: Kubernetes gRPC load test 2 +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Create app +# run: | +# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 +# kubectl expose deploy hello --port=50051 +# kubectl wait --for=condition=available --timeout=60s deploy/hello +# - name: iter8 k launch +# run: | +# iter8 k launch \ +# --set tasks={grpc} \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" +# - name: try other iter8 k commands +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete - readiness: - name: Kubernetes readiness test - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Deploy resources to cluster - run: | - kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 - kubectl expose deploy httpbin --port=80 - - name: k launch with readiness checks - run: | - iter8 k launch \ - --set "tasks={ready,http}" \ - --set ready.deploy="httpbin" \ - --set ready.service="httpbin" \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default +# readiness: +# name: Kubernetes readiness test +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Deploy resources to cluster +# run: | +# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 +# kubectl expose deploy httpbin --port=80 +# - name: k launch with readiness checks +# run: | +# iter8 k launch \ +# --set "tasks={ready,http}" \ +# --set ready.deploy="httpbin" \ +# --set ready.service="httpbin" \ +# --set ready.timeout=60s \ +# --set http.url=http://httpbin.default - readiness-with-namespace: - name: Kubernetes readiness test with namespace - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Deploy resources to cluster - run: | - kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 - kubectl expose deploy httpbin --port=80 - kubectl create namespace experiments - - name: k launch with readiness checks - run: | - iter8 k launch -n experiments \ - --set "tasks={ready,http}" \ - --set ready.deploy="httpbin" \ - --set ready.service="httpbin" \ - --set ready.timeout=60s \ - --set ready.namespace=default \ - --set http.url=http://httpbin.default/get \ No newline at end of file +# readiness-with-namespace: +# name: Kubernetes readiness test with namespace +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Deploy resources to cluster +# run: | +# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 +# kubectl expose deploy httpbin --port=80 +# kubectl create namespace experiments +# - name: k launch with readiness checks +# run: | +# iter8 k launch -n experiments \ +# --set "tasks={ready,http}" \ +# --set ready.deploy="httpbin" \ +# --set ready.service="httpbin" \ +# --set ready.timeout=60s \ +# --set ready.namespace=default \ +# --set http.url=http://httpbin.default/get \ No newline at end of file diff --git a/.github/workflows/draftrelease.yaml b/.github/workflows/draftrelease.yaml index d05c20277..5236cebf8 100644 --- a/.github/workflows/draftrelease.yaml +++ b/.github/workflows/draftrelease.yaml @@ -1,18 +1,18 @@ -name: Release drafter +# name: Release drafter -on: - push: - # branches to consider in the event; optional, defaults to all - branches: - - master +# on: +# push: +# # branches to consider in the event; optional, defaults to all +# branches: +# - master -jobs: - update_release_draft: - runs-on: ubuntu-latest - steps: - # Drafts your next Release notes as Pull Requests are merged into any tracked branch - - uses: release-drafter/release-drafter@v5 - with: - config-name: release-config.yaml - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file +# jobs: +# update_release_draft: +# runs-on: ubuntu-latest +# steps: +# # Drafts your next Release notes as Pull Requests are merged into any tracked branch +# - uses: release-drafter/release-drafter@v5 +# with: +# config-name: release-config.yaml +# env: +# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 7509f8e0c..3916deceb 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -1,43 +1,43 @@ -name: golangci-lint +# name: golangci-lint -on: - pull_request: +# on: +# pull_request: -permissions: - contents: read - # Optional: allow read access to pull request. Use with `only-new-issues` option. - # pull-requests: read +# permissions: +# contents: read +# # Optional: allow read access to pull request. Use with `only-new-issues` option. +# # pull-requests: read -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: golangci-lint - uses: golangci/golangci-lint-action@v3 - with: - # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.50.1 - - # Optional: working directory, useful for monorepos - # working-directory: somedir - - # Optional: golangci-lint command line arguments. - # args: --issues-exit-code=0 - - # Optional: show only new issues if it's a pull request. The default value is `false`. - # only-new-issues: true - - # Optional: if set to true then the all caching functionality will be complete disabled, - # takes precedence over all other caching options. - # skip-cache: true - - # Optional: if set to true then the action don't cache or restore ~/go/pkg. - # skip-pkg-cache: true - - # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. - # skip-build-cache: true \ No newline at end of file +# jobs: +# golangci: +# name: lint +# runs-on: ubuntu-latest +# steps: +# - uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - uses: actions/checkout@v3 +# - name: golangci-lint +# uses: golangci/golangci-lint-action@v3 +# with: +# # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version +# version: v1.50.1 + +# # Optional: working directory, useful for monorepos +# # working-directory: somedir + +# # Optional: golangci-lint command line arguments. +# # args: --issues-exit-code=0 + +# # Optional: show only new issues if it's a pull request. The default value is `false`. +# # only-new-issues: true + +# # Optional: if set to true then the all caching functionality will be complete disabled, +# # takes precedence over all other caching options. +# # skip-cache: true + +# # Optional: if set to true then the action don't cache or restore ~/go/pkg. +# # skip-pkg-cache: true + +# # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. +# # skip-build-cache: true \ No newline at end of file diff --git a/.github/workflows/linkcheck.yaml b/.github/workflows/linkcheck.yaml index 0e8bc8e25..2cbb7f145 100644 --- a/.github/workflows/linkcheck.yaml +++ b/.github/workflows/linkcheck.yaml @@ -1,29 +1,29 @@ -name: Link checker +# name: Link checker -on: - pull_request: - branches: - - master - schedule: - - cron: "0 0 1 * *" +# on: +# pull_request: +# branches: +# - master +# schedule: +# - cron: "0 0 1 * *" -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - # This workflow contains a single job called "build" - build: - # The type of runner that the job will run on - runs-on: ubuntu-latest +# # A workflow run is made up of one or more jobs that can run sequentially or in parallel +# jobs: +# # This workflow contains a single job called "build" +# build: +# # The type of runner that the job will run on +# runs-on: ubuntu-latest - # Steps represent a sequence of tasks that will be executed as part of the job - steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v3 +# # Steps represent a sequence of tasks that will be executed as part of the job +# steps: +# # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it +# - uses: actions/checkout@v3 - - name: Link checker - id: lychee - uses: lycheeverse/lychee-action@v1.6.1 - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} - with: - fail: true - args: -v '**/*.md' \ No newline at end of file +# - name: Link checker +# id: lychee +# uses: lycheeverse/lychee-action@v1.6.1 +# env: +# GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} +# with: +# fail: true +# args: -v '**/*.md' \ No newline at end of file diff --git a/.github/workflows/lintcharts.yaml b/.github/workflows/lintcharts.yaml index 6d209c86a..d99e31be7 100644 --- a/.github/workflows/lintcharts.yaml +++ b/.github/workflows/lintcharts.yaml @@ -1,65 +1,65 @@ -name: Lint Helm charts +# name: Lint Helm charts -on: - pull_request: - branches: - - master +# on: +# pull_request: +# branches: +# - master -jobs: - # Get the paths for the Helm charts to lint - get_paths: - runs-on: ubuntu-latest +# jobs: +# # Get the paths for the Helm charts to lint +# get_paths: +# runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 +# steps: +# - uses: actions/checkout@v3 +# with: +# fetch-depth: 0 - - name: Get the paths for Helm charts to lint - id: set-matrix - run: | - # Get paths (in string form) - stringPaths=$(find -maxdepth 2 -path './charts/*') +# - name: Get the paths for Helm charts to lint +# id: set-matrix +# run: | +# # Get paths (in string form) +# stringPaths=$(find -maxdepth 2 -path './charts/*') - # Check paths (length greater than 0) - stringPathsLength=$(echo ${#stringPaths}) - if (( stringPathsLength == 0 )); - then - echo "No paths to check" - exit 1 - fi +# # Check paths (length greater than 0) +# stringPathsLength=$(echo ${#stringPaths}) +# if (( stringPathsLength == 0 )); +# then +# echo "No paths to check" +# exit 1 +# fi - # Serialize paths into JSON array - paths=$(jq -ncR '[inputs]' <<< "$stringPaths") +# # Serialize paths into JSON array +# paths=$(jq -ncR '[inputs]' <<< "$stringPaths") - # Output serialized paths - echo "matrix=$paths" >> $GITHUB_OUTPUT - echo $paths +# # Output serialized paths +# echo "matrix=$paths" >> $GITHUB_OUTPUT +# echo $paths - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} +# outputs: +# matrix: ${{ steps.set-matrix.outputs.matrix }} - # Lint Helm charts based on paths provided by previous job - lint: - name: Test changed-files - needs: get_paths - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_paths.outputs.matrix) }} - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 +# # Lint Helm charts based on paths provided by previous job +# lint: +# name: Test changed-files +# needs: get_paths +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_paths.outputs.matrix) }} +# steps: +# - uses: actions/checkout@v3 +# with: +# fetch-depth: 0 - - name: Get modified files in the ${{ matrix.version }} folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: ${{ matrix.version }} +# - name: Get modified files in the ${{ matrix.version }} folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: ${{ matrix.version }} - - name: Lint Helm charts in the ${{ matrix.version }} folder - uses: stackrox/kube-linter-action@v1 - if: steps.modified-files.outputs.any_modified == 'true' - with: - directory: ${{ matrix.version }} \ No newline at end of file +# - name: Lint Helm charts in the ${{ matrix.version }} folder +# uses: stackrox/kube-linter-action@v1 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# directory: ${{ matrix.version }} \ No newline at end of file diff --git a/.github/workflows/lintcharts2.yaml b/.github/workflows/lintcharts2.yaml index 31b5281f8..89f13bc2e 100644 --- a/.github/workflows/lintcharts2.yaml +++ b/.github/workflows/lintcharts2.yaml @@ -1,77 +1,77 @@ -name: Additional Helm chart linting -# Like lintcharts.yaml, the other lint Helm chart workflow, this workflow uses kube-linter -# kube-linter checks Helm templates but it does not check what is contained in {{ define ... }} blocks -# This workflow builds on the other workflow by producing Kubernetes YAML files from the templates and running kube-linter on those files -# See iter8-tools/iter8#1452 +# name: Additional Helm chart linting +# # Like lintcharts.yaml, the other lint Helm chart workflow, this workflow uses kube-linter +# # kube-linter checks Helm templates but it does not check what is contained in {{ define ... }} blocks +# # This workflow builds on the other workflow by producing Kubernetes YAML files from the templates and running kube-linter on those files +# # See iter8-tools/iter8#1452 -on: - pull_request: - branches: - - master +# on: +# pull_request: +# branches: +# - master -jobs: - http-experiment: - name: Lint HTTP experiment - runs-on: ubuntu-latest +# jobs: +# http-experiment: +# name: Lint HTTP experiment +# runs-on: ubuntu-latest - steps: - - name: Check out code - uses: actions/checkout@v3 +# steps: +# - name: Check out code +# uses: actions/checkout@v3 - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 - - uses: azure/setup-helm@v3 - if: steps.modified-files.outputs.any_modified == 'true' - with: - token: ${{ secrets.GITHUB_TOKEN }} +# - uses: azure/setup-helm@v3 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# token: ${{ secrets.GITHUB_TOKEN }} - - name: Create Kubernetes YAML file - if: steps.modified-files.outputs.any_modified == 'true' - run: | - helm template charts/iter8 \ - --set tasks={http} \ - --set http.url=http://httpbin.default/get >> iter8.yaml +# - name: Create Kubernetes YAML file +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# helm template charts/iter8 \ +# --set tasks={http} \ +# --set http.url=http://httpbin.default/get >> iter8.yaml - - name: Lint Kubernetes YAML file - if: steps.modified-files.outputs.any_modified == 'true' - uses: stackrox/kube-linter-action@v1 - with: - directory: iter8.yaml +# - name: Lint Kubernetes YAML file +# if: steps.modified-files.outputs.any_modified == 'true' +# uses: stackrox/kube-linter-action@v1 +# with: +# directory: iter8.yaml - grpc-experiment: - name: Lint gRPC experiment - runs-on: ubuntu-latest +# grpc-experiment: +# name: Lint gRPC experiment +# runs-on: ubuntu-latest - steps: - - name: Check out code - uses: actions/checkout@v3 +# steps: +# - name: Check out code +# uses: actions/checkout@v3 - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 - - uses: azure/setup-helm@v3 - if: steps.modified-files.outputs.any_modified == 'true' - with: - token: ${{ secrets.GITHUB_TOKEN }} +# - uses: azure/setup-helm@v3 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# token: ${{ secrets.GITHUB_TOKEN }} - - name: Create Kubernetes YAML file - if: steps.modified-files.outputs.any_modified == 'true' - run: | - helm template charts/iter8 \ - --set tasks={grpc} \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" >> iter8.yaml +# - name: Create Kubernetes YAML file +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# helm template charts/iter8 \ +# --set tasks={grpc} \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" >> iter8.yaml - - name: Lint Kubernetes YAML file - if: steps.modified-files.outputs.any_modified == 'true' - uses: stackrox/kube-linter-action@v1 - with: - directory: iter8.yaml +# - name: Lint Kubernetes YAML file +# if: steps.modified-files.outputs.any_modified == 'true' +# uses: stackrox/kube-linter-action@v1 +# with: +# directory: iter8.yaml diff --git a/.github/workflows/releasecharts.yaml b/.github/workflows/releasecharts.yaml index 84d271dd3..e926d7465 100644 --- a/.github/workflows/releasecharts.yaml +++ b/.github/workflows/releasecharts.yaml @@ -1,34 +1,34 @@ -name: Release charts +# name: Release charts -on: - push: - branches: - - master +# on: +# push: +# branches: +# - master -jobs: - release: - permissions: - contents: write - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 +# jobs: +# release: +# permissions: +# contents: write +# runs-on: ubuntu-latest +# steps: +# - name: Checkout +# uses: actions/checkout@v3 +# with: +# fetch-depth: 0 - - name: Configure Git - run: | - git config user.name "$GITHUB_ACTOR" - git config user.email "$GITHUB_ACTOR@users.noreply.github.com" +# - name: Configure Git +# run: | +# git config user.name "$GITHUB_ACTOR" +# git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - - name: Install Helm - uses: azure/setup-helm@v3 - with: - token: ${{ secrets.GITHUB_TOKEN }} +# - name: Install Helm +# uses: azure/setup-helm@v3 +# with: +# token: ${{ secrets.GITHUB_TOKEN }} - - name: Run chart-releaser - uses: helm/chart-releaser-action@v1.5.0 - with: - config: config.yaml - env: - CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" +# - name: Run chart-releaser +# uses: helm/chart-releaser-action@v1.5.0 +# with: +# config: config.yaml +# env: +# CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/spellcheck.yaml b/.github/workflows/spellcheck.yaml index 2f66c906e..a9f572a7f 100644 --- a/.github/workflows/spellcheck.yaml +++ b/.github/workflows/spellcheck.yaml @@ -1,20 +1,20 @@ -name: Spell check markdown +# name: Spell check markdown -on: - pull_request: - branches: - - master +# on: +# pull_request: +# branches: +# - master -jobs: - spell-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - run: | - pwd - ls -l - - uses: rojopolis/spellcheck-github-actions@0.29.0 - with: - config_path: .github/spellcheck.yml +# jobs: +# spell-check: +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v3 +# with: +# fetch-depth: 0 +# - run: | +# pwd +# ls -l +# - uses: rojopolis/spellcheck-github-actions@0.29.0 +# with: +# config_path: .github/spellcheck.yml diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index 81264923d..317b45371 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -1,423 +1,423 @@ -name: Tests to ensure that changes to charts do not break user experience - -on: - pull_request: - branches: - - master - -# Kind versions used to test Iter8 on different versions of Kubernetes -# From: https://github.com/kubernetes-sigs/kind/releases -env: - versions: | - kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f - kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f - kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 - kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c - kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 - kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 - kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 - kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 - -jobs: - # Get the different Kind versions - get_versions: - runs-on: ubuntu-latest - - steps: - - name: Get the different Kind versions - id: set-matrix - run: | - # Serialize versions into JSON array - jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") - echo $jsonVersions - - # Output serialized jsonVersions - echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT - - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - - http-experiment: - name: HTTP load test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create app - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={http}" \ - --set http.url="http://httpbin.default/get" \ - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - sleep 60 - iter8 k log - iter8 k delete - - http-payload-experiment: - name: HTTP load test with payload - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create app - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={http}" \ - --set http.url="http://httpbin.default/post" \ - --set http.payloadStr=hello \ - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - sleep 60 - iter8 k log - iter8 k delete - - http-multiple-experiment: - name: HTTP load test with multiple endpoints - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create app - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={http}" \ - --set http.endpoints.get.url=http://httpbin.default/get \ - --set http.endpoints.getAnything.url=http://httpbin.default/anything \ - --set http.endpoints.post.url=http://httpbin.default/post \ - --set http.endpoints.post.payloadStr=hello \ - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - sleep 60 - iter8 k log - iter8 k delete - - grpc-experiment: - name: gRPC load test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 +# name: Tests to ensure that changes to charts do not break user experience + +# on: +# pull_request: +# branches: +# - master + +# # Kind versions used to test Iter8 on different versions of Kubernetes +# # From: https://github.com/kubernetes-sigs/kind/releases +# env: +# versions: | +# kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f +# kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f +# kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 +# kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c +# kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 +# kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 +# kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 +# kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 + +# jobs: +# # Get the different Kind versions +# get_versions: +# runs-on: ubuntu-latest + +# steps: +# - name: Get the different Kind versions +# id: set-matrix +# run: | +# # Serialize versions into JSON array +# jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") +# echo $jsonVersions + +# # Output serialized jsonVersions +# echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT + +# outputs: +# matrix: ${{ steps.set-matrix.outputs.matrix }} + +# http-experiment: +# name: HTTP load test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 + +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create app +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment httpbin --image=kennethreitz/httpbin +# kubectl expose deployment httpbin --type=ClusterIP --port=80 +# kubectl wait --for=condition=available --timeout=60s deploy/httpbin + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={http}" \ +# --set http.url="http://httpbin.default/get" \ + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete + +# http-payload-experiment: +# name: HTTP load test with payload +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 + +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create app +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment httpbin --image=kennethreitz/httpbin +# kubectl expose deployment httpbin --type=ClusterIP --port=80 +# kubectl wait --for=condition=available --timeout=60s deploy/httpbin + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={http}" \ +# --set http.url="http://httpbin.default/post" \ +# --set http.payloadStr=hello \ + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete + +# http-multiple-experiment: +# name: HTTP load test with multiple endpoints +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 + +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create app +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment httpbin --image=kennethreitz/httpbin +# kubectl expose deployment httpbin --type=ClusterIP --port=80 +# kubectl wait --for=condition=available --timeout=60s deploy/httpbin + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={http}" \ +# --set http.endpoints.get.url=http://httpbin.default/get \ +# --set http.endpoints.getAnything.url=http://httpbin.default/anything \ +# --set http.endpoints.post.url=http://httpbin.default/post \ +# --set http.endpoints.post.payloadStr=hello \ + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete + +# grpc-experiment: +# name: gRPC load test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Deploy gRPC service in the Kubernetes cluster - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - - name: Test gRPC service with grpcurl - if: steps.modified-files.outputs.any_modified == 'true' - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Deploy gRPC service in the Kubernetes cluster +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment routeguide --image=golang --port=50051 \ +# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" +# kubectl expose deployment routeguide --port=50051 +# kubectl wait --for=condition=available --timeout=60s deployment/routeguide + +# - name: Test gRPC service with grpcurl +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml +# kubectl apply -f grpcurl-routeguide.yaml +# sleep 180 +# kubectl logs deploy/sleep - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.call=routeguide.RouteGuide.GetFeature \ - --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - sleep 60 - iter8 k log - iter8 k delete - - grpc-multiple-experiment: - name: gRPC load test with multiple endpoints - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=routeguide \ +# --set ready.service=routeguide \ +# --set ready.timeout=60s \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete + +# grpc-multiple-experiment: +# name: gRPC load test with multiple endpoints +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Deploy gRPC service in the Kubernetes cluster - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - - name: Test gRPC service with grpcurl - if: steps.modified-files.outputs.any_modified == 'true' - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Deploy gRPC service in the Kubernetes cluster +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment routeguide --image=golang --port=50051 \ +# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" +# kubectl expose deployment routeguide --port=50051 +# kubectl wait --for=condition=available --timeout=60s deployment/routeguide + +# - name: Test gRPC service with grpcurl +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml +# kubectl apply -f grpcurl-routeguide.yaml +# sleep 180 +# kubectl logs deploy/sleep - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ - --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ - --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - sleep 60 - iter8 k log - iter8 k delete - - grpc-experiment2: - name: gRPC load test 2 - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=routeguide \ +# --set ready.service=routeguide \ +# --set ready.timeout=60s \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ +# --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ +# --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete + +# grpc-experiment2: +# name: gRPC load test 2 +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create app - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 - kubectl expose deploy hello --port=50051 - kubectl wait --for=condition=available --timeout=60s deploy/hello +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create app +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 +# kubectl expose deploy hello --port=50051 +# kubectl wait --for=condition=available --timeout=60s deploy/hello - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={grpc}" \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - sleep 60 - iter8 k log - iter8 k delete - - controller: - name: Controller test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/controller folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/controller - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={grpc}" \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# sleep 60 +# iter8 k log +# iter8 k delete + +# controller: +# name: Controller test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/controller folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/controller + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} - - name: Start controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - helm install controller charts/controller -f charts/controller/testdata/values.yaml - - - name: Check controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/controller +# - name: Start controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# helm install controller charts/controller -f charts/controller/testdata/values.yaml + +# - name: Check controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl rollout status --watch --timeout=60s statefulset.apps/controller diff --git a/.github/workflows/testkustomize.yaml b/.github/workflows/testkustomize.yaml index dc17ff7fa..96b47eeb8 100644 --- a/.github/workflows/testkustomize.yaml +++ b/.github/workflows/testkustomize.yaml @@ -1,106 +1,106 @@ -name: Test kustomize experiments - -on: - pull_request: - -# Kind versions used to test Iter8 on different versions of Kubernetes -# From: https://github.com/kubernetes-sigs/kind/releases -env: - versions: | - kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f - kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f - kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 - kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c - kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 - kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 - kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 - kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 - -jobs: - # Get the paths for the Helm charts to lint - get_versions: - runs-on: ubuntu-latest - - steps: - - name: Get the paths for Helm charts to lint - id: set-matrix - run: | - # Serialize versions into JSON array - jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") - echo $jsonVersions - - # Output serialized jsonVersions - echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT - - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - - controller: - name: Controller test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/controller folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/controller - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} +# name: Test kustomize experiments + +# on: +# pull_request: + +# # Kind versions used to test Iter8 on different versions of Kubernetes +# # From: https://github.com/kubernetes-sigs/kind/releases +# env: +# versions: | +# kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f +# kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f +# kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 +# kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c +# kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 +# kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 +# kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 +# kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 + +# jobs: +# # Get the paths for the Helm charts to lint +# get_versions: +# runs-on: ubuntu-latest + +# steps: +# - name: Get the paths for Helm charts to lint +# id: set-matrix +# run: | +# # Serialize versions into JSON array +# jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") +# echo $jsonVersions + +# # Output serialized jsonVersions +# echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT + +# outputs: +# matrix: ${{ steps.set-matrix.outputs.matrix }} + +# controller: +# name: Controller test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/controller folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/controller + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} - - name: Start controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl apply -k kustomize/controller/namespaceScoped - - - name: Check controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 - - controller-clusterScoped: - name: Controller cluster scoped test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/controller folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/controller - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} +# - name: Start controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl apply -k kustomize/controller/namespaceScoped + +# - name: Check controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 + +# controller-clusterScoped: +# name: Controller cluster scoped test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/controller folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/controller + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} - - name: Start controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl apply -k kustomize/controller/clusterScoped - - - name: Check controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file +# - name: Start controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl apply -k kustomize/controller/clusterScoped + +# - name: Check controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file diff --git a/.github/workflows/versionbump.yaml b/.github/workflows/versionbump.yaml index a8182f70d..b6bc7bc10 100644 --- a/.github/workflows/versionbump.yaml +++ b/.github/workflows/versionbump.yaml @@ -1,88 +1,88 @@ -name: Version bump check - -on: - pull_request: - branches: - - master - -jobs: - # Get the paths for the Helm charts to version check - get_paths: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Get the paths for Helm charts to version check - id: set-matrix - run: | - # Get paths (in string form) - stringPaths=$(find -maxdepth 2 -path './charts/*') - - # Check paths (length greater than 0) - stringPathsLength=$(echo ${#stringPaths}) - if (( stringPathsLength == 0 )); - then - echo "No paths to check" - exit 1 - fi - - # Serialize paths into JSON array - paths=$(jq -ncR '[inputs]' <<< "$stringPaths") - echo $paths - - # Output serialized paths - echo "matrix=$paths" >> $GITHUB_OUTPUT - - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - - # Version check Helm charts based on paths provided by previous job - version_check: - name: Version check - needs: get_paths - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_paths.outputs.matrix) }} - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Get modified files in the ${{ matrix.version }} folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: ${{ matrix.version }} - - - name: Run step if any file(s) in the ${{ matrix.version }} folder was modified - if: steps.modified-files.outputs.any_modified == 'true' - run: | - # Remove ./ prefix from raw matrix version (i.e. ./charts/iter8 -> charts/iter8) - version=$(echo ${{ matrix.version }} | sed s/".\/"//) - - # Get chart file - chartFile="$version/Chart.yaml" - - # Get git diff of the Chart.yaml between the master branch and PR branch - gitDiff=$(git diff origin/master..HEAD -- $chartFile) - echo $gitDiff - - # Addition in Chart.yaml - addChart="+++ b/$add$chartFile" - echo $addChart - - # Addition of version in Chart.yaml - addVersion="+version:" - echo $addVersion - - if [[ "$gitDiff" == *"$addChart"* ]] && [[ "$gitDiff" == *$addVersion* ]]; - then - echo "version in $chartFile has been modified" - else - echo "version in $chartFile needs to be modified" - exit 1 - fi \ No newline at end of file +# name: Version bump check + +# on: +# pull_request: +# branches: +# - master + +# jobs: +# # Get the paths for the Helm charts to version check +# get_paths: +# runs-on: ubuntu-latest + +# steps: +# - uses: actions/checkout@v3 +# with: +# fetch-depth: 0 + +# - name: Get the paths for Helm charts to version check +# id: set-matrix +# run: | +# # Get paths (in string form) +# stringPaths=$(find -maxdepth 2 -path './charts/*') + +# # Check paths (length greater than 0) +# stringPathsLength=$(echo ${#stringPaths}) +# if (( stringPathsLength == 0 )); +# then +# echo "No paths to check" +# exit 1 +# fi + +# # Serialize paths into JSON array +# paths=$(jq -ncR '[inputs]' <<< "$stringPaths") +# echo $paths + +# # Output serialized paths +# echo "matrix=$paths" >> $GITHUB_OUTPUT + +# outputs: +# matrix: ${{ steps.set-matrix.outputs.matrix }} + +# # Version check Helm charts based on paths provided by previous job +# version_check: +# name: Version check +# needs: get_paths +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_paths.outputs.matrix) }} +# steps: +# - uses: actions/checkout@v3 +# with: +# fetch-depth: 0 + +# - name: Get modified files in the ${{ matrix.version }} folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: ${{ matrix.version }} + +# - name: Run step if any file(s) in the ${{ matrix.version }} folder was modified +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# # Remove ./ prefix from raw matrix version (i.e. ./charts/iter8 -> charts/iter8) +# version=$(echo ${{ matrix.version }} | sed s/".\/"//) + +# # Get chart file +# chartFile="$version/Chart.yaml" + +# # Get git diff of the Chart.yaml between the master branch and PR branch +# gitDiff=$(git diff origin/master..HEAD -- $chartFile) +# echo $gitDiff + +# # Addition in Chart.yaml +# addChart="+++ b/$add$chartFile" +# echo $addChart + +# # Addition of version in Chart.yaml +# addVersion="+version:" +# echo $addVersion + +# if [[ "$gitDiff" == *"$addChart"* ]] && [[ "$gitDiff" == *$addVersion* ]]; +# then +# echo "version in $chartFile has been modified" +# else +# echo "version in $chartFile needs to be modified" +# exit 1 +# fi \ No newline at end of file From 34e83d821dd6f68f752eb688f68e7ee037d5eef5 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 22 Aug 2023 17:39:30 -0400 Subject: [PATCH 080/121] Comment testperformance workflow Signed-off-by: Alan Cha --- .github/workflows/testperformance.yaml | 388 ++++++++++++------------- 1 file changed, 194 insertions(+), 194 deletions(-) diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index b390a4370..2b467119b 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -1,212 +1,212 @@ -name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) +# name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) -on: - pull_request: +# on: +# pull_request: -jobs: - unit-test: - name: Unit test - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Test and compute coverage - run: make coverage # includes vet and lint - - name: Enforce coverage - run: | - export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') - echo "code coverage is at ${COVERAGE}" - if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ - echo "all good... coverage is above 76.0%"; - else \ - echo "not good... coverage is not above 76.0%"; - exit 1 - fi +# jobs: +# unit-test: +# name: Unit test +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - name: Check out code into the Go module directory +# uses: actions/checkout@v3 +# - name: Test and compute coverage +# run: make coverage # includes vet and lint +# - name: Enforce coverage +# run: | +# export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') +# echo "code coverage is at ${COVERAGE}" +# if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ +# echo "all good... coverage is above 76.0%"; +# else \ +# echo "not good... coverage is not above 76.0%"; +# exit 1 +# fi - kubernetes-load-test-http: - name: HTTP load test (with readiness) at the edge of Kubernetes - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s +# kubernetes-load-test-http: +# name: HTTP load test (with readiness) at the edge of Kubernetes +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - name: Check out code into the Go module directory +# uses: actions/checkout@v3 +# - name: Build and install Iter8 +# run: make install +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s - - name: run httpbin - run: | - set -e - kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 - kubectl expose deploy httpbin --port=80 +# - name: run httpbin +# run: | +# set -e +# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 +# kubectl expose deploy httpbin --port=80 - - name: load-test-http in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default/get \ - --set http.duration="3s" - sleep 60 - iter8 k log - iter8 k delete +# - name: load-test-http in Kubernetes +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set "tasks={ready,http}" \ +# --set ready.deploy=httpbin \ +# --set ready.service=httpbin \ +# --set ready.timeout=60s \ +# --set http.url=http://httpbin.default/get \ +# --set http.duration="3s" +# sleep 60 +# iter8 k log +# iter8 k delete - - name: load-test-http with payload in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default/post \ - --set http.payloadStr=hello \ - --set http.duration="3s" - sleep 60 - iter8 k log - iter8 k delete +# - name: load-test-http with payload in Kubernetes +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set "tasks={ready,http}" \ +# --set ready.deploy=httpbin \ +# --set ready.service=httpbin \ +# --set ready.timeout=60s \ +# --set http.url=http://httpbin.default/post \ +# --set http.payloadStr=hello \ +# --set http.duration="3s" +# sleep 60 +# iter8 k log +# iter8 k delete - - name: load-test-http with multiple endpoints in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.endpoints.get.url=http://httpbin.default/get \ - --set http.endpoints.getAnything.url=http://httpbin.default/anything \ - --set http.endpoints.post.url=http://httpbin.default/post \ - --set http.endpoints.post.payloadStr=hello \ - --set http.duration="3s" - sleep 60 - iter8 k log - iter8 k delete +# - name: load-test-http with multiple endpoints in Kubernetes +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set "tasks={ready,http}" \ +# --set ready.deploy=httpbin \ +# --set ready.service=httpbin \ +# --set ready.timeout=60s \ +# --set http.endpoints.get.url=http://httpbin.default/get \ +# --set http.endpoints.getAnything.url=http://httpbin.default/anything \ +# --set http.endpoints.post.url=http://httpbin.default/post \ +# --set http.endpoints.post.payloadStr=hello \ +# --set http.duration="3s" +# sleep 60 +# iter8 k log +# iter8 k delete - kubernetes-load-test-grpc: - name: gRPC load test with various URLs - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install +# kubernetes-load-test-grpc: +# name: gRPC load test with various URLs +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - name: Check out code into the Go module directory +# uses: actions/checkout@v3 +# - name: Build and install Iter8 +# run: make install - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s - - name: Deploy gRPC service in the Kubernetes cluster - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide +# - name: Deploy gRPC service in the Kubernetes cluster +# run: | +# kubectl create deployment routeguide --image=golang --port=50051 \ +# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" +# kubectl expose deployment routeguide --port=50051 +# kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - name: Test gRPC service with grpcurl - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep +# - name: Test gRPC service with grpcurl +# run: | +# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml +# kubectl apply -f grpcurl-routeguide.yaml +# sleep 180 +# kubectl logs deploy/sleep - - name: load test grpc service - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.call=routeguide.RouteGuide.GetFeature \ - --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json - sleep 60 - iter8 k log - iter8 k delete +# - name: load test grpc service +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=routeguide \ +# --set ready.service=routeguide \ +# --set ready.timeout=60s \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json +# sleep 60 +# iter8 k log +# iter8 k delete - - name: load test grpc service with multiple endpoints - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ - --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ - --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json - sleep 60 - iter8 k log - iter8 k delete +# - name: load test grpc service with multiple endpoints +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=routeguide \ +# --set ready.service=routeguide \ +# --set ready.timeout=60s \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ +# --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ +# --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json +# sleep 60 +# iter8 k log +# iter8 k delete - kubernetes-load-test-grpc2: - name: gRPC load test 2 with various URLs - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install +# kubernetes-load-test-grpc2: +# name: gRPC load test 2 with various URLs +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - name: Check out code into the Go module directory +# uses: actions/checkout@v3 +# - name: Build and install Iter8 +# run: make install - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s - - name: Deploy gRPC service in the Kubernetes cluster - run: | - kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 - kubectl expose deploy hello --port=50051 +# - name: Deploy gRPC service in the Kubernetes cluster +# run: | +# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 +# kubectl expose deploy hello --port=50051 - - name: load test grpc service with protoURL - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=hello \ - --set ready.service=hello \ - --set ready.timeout=60s \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set grpc.data.name="frodo" - sleep 60 - iter8 k log - iter8 k delete +# - name: load test grpc service with protoURL +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=hello \ +# --set ready.service=hello \ +# --set ready.timeout=60s \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ +# --set grpc.data.name="frodo" +# sleep 60 +# iter8 k log +# iter8 k delete - - name: load test grpc service with proto/data/metadata URLs - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=hello \ - --set ready.service=hello \ - --set ready.timeout=60s \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ - --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" - sleep 60 - iter8 k log - iter8 k delete +# - name: load test grpc service with proto/data/metadata URLs +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=hello \ +# --set ready.service=hello \ +# --set ready.timeout=60s \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ +# --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ +# --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" +# sleep 60 +# iter8 k log +# iter8 k delete From 105264c9cc4a2f422b3fba7201504e40ffd888fc Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Tue, 22 Aug 2023 17:53:29 -0400 Subject: [PATCH 081/121] Add test Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 93 +++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 .github/workflows/test.yaml diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 000000000..53c448624 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,93 @@ +name: Test + +on: + pull_request: + branches: + - master + +jobs: + assets: + name: Publish binaries + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + - name: Build binaries + run: | + VERSION=${GITHUB_REF#refs/*/} + echo "Version: ${VERSION}" + GITHUB_REF=${{ env.GITHUB_REF }} + echo "GITHUB_REF: ${GITHUB_REF}" + OWNER=${{ env.OWNER }} + echo "OWNER: ${OWNER}" + OWNER2=${OWNER} + echo "OWNER2: ${OWNER2}" + # - name: Upload binaries to release + # uses: svenstaro/upload-release-action@v2 + # with: + # repo_token: ${{ secrets.GITHUB_TOKEN }} + # file: _dist/iter8-*.tar.gz + # tag: ${{ github.ref }} + # overwrite: true + # file_glob: true + # - name: Create checksum + # run: | + # VERSION=${GITHUB_REF#refs/*/} + # echo "VERSION=$VERSION" >> $GITHUB_ENV + # wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip + # sha256sum ${VERSION}.zip > checksum.txt + # wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz + # sha256sum ${VERSION}.tar.gz >> checksum.txt + # cd _dist + # for f in iter8-*.tar.gz + # do + # sha256sum ${f} >> ../checksum.txt + # done + # # pick up darwin checksum and export it + # echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV + # - name: Upload checksum to release + # uses: svenstaro/upload-release-action@v2 + # with: + # repo_token: ${{ secrets.GITHUB_TOKEN }} + # asset_name: checksum.txt + # file: checksum.txt + # tag: ${{ github.ref }} + # overwrite: true + +# build-and-push: +# name: Push Iter8 image to Docker Hub +# needs: assets +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v3 +# with: +# fetch-depth: 0 +# - name: Get version +# run: | +# tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') +# # Strip "v" prefix from tagref +# echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV +# echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV +# - name: Get owner +# run: | +# ownerrepo=${{ github.repository }} +# owner=$(echo $ownerrepo | cut -f1 -d/) +# if [[ "$owner" == "iter8-tools" ]]; then +# owner=iter8 +# fi +# echo "OWNER=$owner" >> $GITHUB_ENV +# - uses: docker/setup-buildx-action@v2 +# - uses: docker/login-action@v2 +# with: +# username: ${{ secrets.DOCKERHUB_USERNAME }} +# password: ${{ secrets.DOCKERHUB_SECRET }} +# - uses: docker/build-push-action@v4 +# with: +# platforms: linux/amd64,linux/arm64 +# tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest +# push: true +# build-args: | +# TAG=v${{ env.VERSION }} \ No newline at end of file From c34c70971df12c803c4b27231da787070a4e3550 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 07:06:06 -0400 Subject: [PATCH 082/121] Print env Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 73 ++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 29 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 53c448624..df6ef9d87 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -15,16 +15,51 @@ jobs: with: go-version: 1.19 - uses: actions/checkout@v3 - - name: Build binaries + - name: Create Dockerfile.dev run: | - VERSION=${GITHUB_REF#refs/*/} - echo "Version: ${VERSION}" - GITHUB_REF=${{ env.GITHUB_REF }} - echo "GITHUB_REF: ${GITHUB_REF}" - OWNER=${{ env.OWNER }} - echo "OWNER: ${OWNER}" - OWNER2=${OWNER} - echo "OWNER2: ${OWNER2}" + echo "# Small linux image with iter8 binary + FROM debian:buster-slim + + # Install curl + RUN apt-get update && apt-get install -y curl + + # Download iter8 compressed binary + # use COPY instead of wget + COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz + + # Extract iter8 + RUN tar -xvf iter8-linux-amd64.tar.gz + + # Extract iter8 + RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev + - name: Get version + run: | + tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') + # Strip "v" prefix from tagref + echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV + - name: Get owner + run: | + ownerrepo=${{ github.repository }} + owner=$(echo $ownerrepo | cut -f1 -d/) + if [[ "$owner" == "iter8-tools" ]]; then + owner=iter8 + fi + echo "OWNER=$owner" >> $GITHUB_ENV + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_SECRET }} + - uses: docker/build-push-action@v4 + with: + platforms: linux/amd64 + tags: ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} + push: true + build-args: | + TAG=v${{ env.VERSION }} + file: "{context}/Dockerfile.dev" + + # - name: Upload binaries to release # uses: svenstaro/upload-release-action@v2 # with: @@ -71,23 +106,3 @@ jobs: # # Strip "v" prefix from tagref # echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV # echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV -# - name: Get owner -# run: | -# ownerrepo=${{ github.repository }} -# owner=$(echo $ownerrepo | cut -f1 -d/) -# if [[ "$owner" == "iter8-tools" ]]; then -# owner=iter8 -# fi -# echo "OWNER=$owner" >> $GITHUB_ENV -# - uses: docker/setup-buildx-action@v2 -# - uses: docker/login-action@v2 -# with: -# username: ${{ secrets.DOCKERHUB_USERNAME }} -# password: ${{ secrets.DOCKERHUB_SECRET }} -# - uses: docker/build-push-action@v4 -# with: -# platforms: linux/amd64,linux/arm64 -# tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest -# push: true -# build-args: | -# TAG=v${{ env.VERSION }} \ No newline at end of file From 9a9399fa89ec4c7e16eac05be600134d34e13f38 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 08:37:13 -0400 Subject: [PATCH 083/121] Move test into assets Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 61 +++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 848744b6f..09699373b 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -1,3 +1,64 @@ +name: Test + +on: + pull_request: + branches: + - master + +jobs: + assets: + name: Publish binaries + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + - name: Create Dockerfile.dev + run: | + echo "# Small linux image with iter8 binary + FROM debian:buster-slim + + # Install curl + RUN apt-get update && apt-get install -y curl + + # Download iter8 compressed binary + # use COPY instead of wget + COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz + + # Extract iter8 + RUN tar -xvf iter8-linux-amd64.tar.gz + + # Extract iter8 + RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev + - name: Get version + run: | + tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') + # Strip "v" prefix from tagref + echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV + - name: Get owner + run: | + ownerrepo=${{ github.repository }} + owner=$(echo $ownerrepo | cut -f1 -d/) + if [[ "$owner" == "iter8-tools" ]]; then + owner=iter8 + fi + echo "OWNER=$owner" >> $GITHUB_ENV + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_SECRET }} + - uses: docker/build-push-action@v4 + with: + platforms: linux/amd64 + tags: ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} + push: true + build-args: | + TAG=v${{ env.VERSION }} + file: "{context}/Dockerfile.dev" + # name: Publish binaries and Docker image # on: From fe4b549501c9444d5c5b1e3f958640d3a3bfdf75 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 09:00:39 -0400 Subject: [PATCH 084/121] Add load test Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 67 ++++++++++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 13 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index df6ef9d87..8dd15aa6c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -45,21 +45,62 @@ jobs: owner=iter8 fi echo "OWNER=$owner" >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 - - uses: docker/login-action@v2 + # - uses: docker/setup-buildx-action@v2 + # - uses: docker/login-action@v2 + # with: + # username: ${{ secrets.DOCKERHUB_USERNAME }} + # password: ${{ secrets.DOCKERHUB_SECRET }} + # - uses: docker/build-push-action@v4 + # with: + # platforms: linux/amd64 + # tags: ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} + # push: true + # build-args: | + # TAG=v${{ env.VERSION }} + # file: "{context}/Dockerfile.dev" + + kubernetes-load-test-http: + name: HTTP load test (with readiness) at the edge of Kubernetes + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_SECRET }} - - uses: docker/build-push-action@v4 + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 with: - platforms: linux/amd64 - tags: ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} - push: true - build-args: | - TAG=v${{ env.VERSION }} - file: "{context}/Dockerfile.dev" - - + wait: 300s + - run: | + echo $(${{ env.OWNER }}/iter8-pr:${{ env.VERSION }}) + echo ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} + env | sort + - name: run httpbin + run: | + set -e + kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + kubectl expose deploy httpbin --port=80 + - name: install controller + run: | + helm install iter8 charts/controller --set image=puffinmuffin/iter8-grafana --set logLevel=trace + - name: load-test-http in Kubernetes + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=puffinmuffin/iter8-grafana \ + --set "tasks={ready,http}" \ + --set ready.deploy=httpbin \ + --set ready.service=httpbin \ + --set ready.timeout=60s \ + --set http.url=http://httpbin.default/get \ + --set http.duration="3s" + sleep 60 + iter8 k log + iter8 k delete + - name: # - name: Upload binaries to release # uses: svenstaro/upload-release-action@v2 # with: From ab196ddcc9382b779fec7ce517a75080b63d9d5a Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 09:04:00 -0400 Subject: [PATCH 085/121] Fix test Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 8dd15aa6c..4f6131734 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -100,7 +100,10 @@ jobs: sleep 60 iter8 k log iter8 k delete - - name: + - name: check GET /httpDashboard + run: | + kubectl port-forward service/iter8 8080:8080 + curl http://localhost:8080/httpDashboard?namespace=default&experiment=default -f # - name: Upload binaries to release # uses: svenstaro/upload-release-action@v2 # with: From b7ce2bb49c45e461ca6cf8df6d1e0c4d5ea720a0 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 09:11:13 -0400 Subject: [PATCH 086/121] Add version and tag Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 4f6131734..0005cccd3 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -32,19 +32,21 @@ jobs: # Extract iter8 RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - - name: Get version + - name: Get version and tag run: | tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') # Strip "v" prefix from tagref echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV - - name: Get owner - run: | - ownerrepo=${{ github.repository }} - owner=$(echo $ownerrepo | cut -f1 -d/) - if [[ "$owner" == "iter8-tools" ]]; then - owner=iter8 - fi - echo "OWNER=$owner" >> $GITHUB_ENV + # echo "IMAGE_TAG=${GITHUB_REF_NAME}/iter8-pr:${GITHUB_REF_NAME}" >> $GITHUB_ENV + echo "IMAGE_TAG=puffinmuffin/iter8-grafana" >> $GITHUB_ENV + # - name: Get owner + # run: | + # ownerrepo=${{ github.repository }} + # owner=$(echo $ownerrepo | cut -f1 -d/) + # if [[ "$owner" == "iter8-tools" ]]; then + # owner=iter8 + # fi + # echo "OWNER=$owner" >> $GITHUB_ENV # - uses: docker/setup-buildx-action@v2 # - uses: docker/login-action@v2 # with: @@ -53,7 +55,7 @@ jobs: # - uses: docker/build-push-action@v4 # with: # platforms: linux/amd64 - # tags: ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} + # tags: ${{ env.IMAGE_TAG }} # push: true # build-args: | # TAG=v${{ env.VERSION }} @@ -61,6 +63,7 @@ jobs: kubernetes-load-test-http: name: HTTP load test (with readiness) at the edge of Kubernetes + needs: assets runs-on: ubuntu-latest steps: - name: Install Go @@ -86,11 +89,11 @@ jobs: kubectl expose deploy httpbin --port=80 - name: install controller run: | - helm install iter8 charts/controller --set image=puffinmuffin/iter8-grafana --set logLevel=trace + helm install iter8 charts/controller --set image=${IMAGE_TAG} --set logLevel=trace - name: load-test-http in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=puffinmuffin/iter8-grafana \ + --set iter8Image=${IMAGE_TAG} \ --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ @@ -102,7 +105,7 @@ jobs: iter8 k delete - name: check GET /httpDashboard run: | - kubectl port-forward service/iter8 8080:8080 + kubectl port-forward service/iter8 8080:8080 & curl http://localhost:8080/httpDashboard?namespace=default&experiment=default -f # - name: Upload binaries to release # uses: svenstaro/upload-release-action@v2 From 1f9d97693c2014033888a4a90a23f1bee72450f4 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 09:15:51 -0400 Subject: [PATCH 087/121] Move echo to beginning Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 0005cccd3..412b1c103 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -34,11 +34,14 @@ jobs: RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - name: Get version and tag run: | - tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') - # Strip "v" prefix from tagref + # GitHub ref name echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV - # echo "IMAGE_TAG=${GITHUB_REF_NAME}/iter8-pr:${GITHUB_REF_NAME}" >> $GITHUB_ENV + echo $VERSION + + # Docker image tag echo "IMAGE_TAG=puffinmuffin/iter8-grafana" >> $GITHUB_ENV + echo $IMAGE_TAG + # - name: Get owner # run: | # ownerrepo=${{ github.repository }} @@ -66,6 +69,11 @@ jobs: needs: assets runs-on: ubuntu-latest steps: + - run: | + echo ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} + echo $(${{ env.OWNER }}/iter8-pr:${{ env.VERSION }}) + echo $IMAGE_TAG + env | sort - name: Install Go uses: actions/setup-go@v4 with: @@ -78,10 +86,6 @@ jobs: uses: helm/kind-action@v1.5.0 with: wait: 300s - - run: | - echo $(${{ env.OWNER }}/iter8-pr:${{ env.VERSION }}) - echo ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} - env | sort - name: run httpbin run: | set -e From df55dcba7252dc373b0595659129b48c4a664e77 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 09:21:01 -0400 Subject: [PATCH 088/121] Delete assets test Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 61 ----------------------------------- .github/workflows/test.yaml | 7 ++-- 2 files changed, 4 insertions(+), 64 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 09699373b..848744b6f 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -1,64 +1,3 @@ -name: Test - -on: - pull_request: - branches: - - master - -jobs: - assets: - name: Publish binaries - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: Create Dockerfile.dev - run: | - echo "# Small linux image with iter8 binary - FROM debian:buster-slim - - # Install curl - RUN apt-get update && apt-get install -y curl - - # Download iter8 compressed binary - # use COPY instead of wget - COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz - - # Extract iter8 - RUN tar -xvf iter8-linux-amd64.tar.gz - - # Extract iter8 - RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - - name: Get version - run: | - tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') - # Strip "v" prefix from tagref - echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV - - name: Get owner - run: | - ownerrepo=${{ github.repository }} - owner=$(echo $ownerrepo | cut -f1 -d/) - if [[ "$owner" == "iter8-tools" ]]; then - owner=iter8 - fi - echo "OWNER=$owner" >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_SECRET }} - - uses: docker/build-push-action@v4 - with: - platforms: linux/amd64 - tags: ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} - push: true - build-args: | - TAG=v${{ env.VERSION }} - file: "{context}/Dockerfile.dev" - # name: Publish binaries and Docker image # on: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 412b1c103..707a1d041 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -35,12 +35,13 @@ jobs: - name: Get version and tag run: | # GitHub ref name - echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV + VERSION=${GITHUB_REF_NAME} echo $VERSION + echo "VERSION=${VERSION}" >> $GITHUB_ENV - # Docker image tag - echo "IMAGE_TAG=puffinmuffin/iter8-grafana" >> $GITHUB_ENV + IMAGE_TAG=$puffinmuffin/iter8-grafana echo $IMAGE_TAG + echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_ENV # - name: Get owner # run: | From 21f9e4b6038ff59555ee3e495b7aec85be0761f4 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 09:29:16 -0400 Subject: [PATCH 089/121] Tweeking Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 707a1d041..151f3a6eb 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -39,7 +39,7 @@ jobs: echo $VERSION echo "VERSION=${VERSION}" >> $GITHUB_ENV - IMAGE_TAG=$puffinmuffin/iter8-grafana + IMAGE_TAG=puffinmuffin/iter8-grafana echo $IMAGE_TAG echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_ENV @@ -71,8 +71,7 @@ jobs: runs-on: ubuntu-latest steps: - run: | - echo ${{ env.OWNER }}/iter8-pr:${{ env.VERSION }} - echo $(${{ env.OWNER }}/iter8-pr:${{ env.VERSION }}) + echo ${{ env.IMAGE_TAG }} echo $IMAGE_TAG env | sort - name: Install Go From c798f23ad18ea41f7f98f1fc959781c122bb64e7 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 13:15:11 -0400 Subject: [PATCH 090/121] Add output Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 151f3a6eb..73f9a70a8 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -33,16 +33,18 @@ jobs: # Extract iter8 RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - name: Get version and tag + id: versionImageTag run: | # GitHub ref name VERSION=${GITHUB_REF_NAME} echo $VERSION - echo "VERSION=${VERSION}" >> $GITHUB_ENV + echo "VERSION=${VERSION}" >> "$GITHUB_ENV" + echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" IMAGE_TAG=puffinmuffin/iter8-grafana echo $IMAGE_TAG - echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_ENV - + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" # - name: Get owner # run: | # ownerrepo=${{ github.repository }} @@ -64,14 +66,19 @@ jobs: # build-args: | # TAG=v${{ env.VERSION }} # file: "{context}/Dockerfile.dev" + outputs: + VERSION: ${{ steps.versionImageTag.outputs.VERSION }} + IMAGE_TAG: ${{ steps.versionImageTag.outputs.IMAGE_TAG }} kubernetes-load-test-http: name: HTTP load test (with readiness) at the edge of Kubernetes needs: assets runs-on: ubuntu-latest steps: - - run: | - echo ${{ env.IMAGE_TAG }} + - env: + VERSION: ${{needs.assets.outputs.VERSION}} + IMAGE_TAG: ${{needs.assets.outputs.IMAGE_TAG}} + run: | echo $IMAGE_TAG env | sort - name: Install Go From 11bdaaeb3e611872d615b7a62a02fe7a66567daa Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 13:23:02 -0400 Subject: [PATCH 091/121] Directly use outputs Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 73f9a70a8..a20fa5c62 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -37,12 +37,12 @@ jobs: run: | # GitHub ref name VERSION=${GITHUB_REF_NAME} - echo $VERSION + echo "VERSION: $VERSION" echo "VERSION=${VERSION}" >> "$GITHUB_ENV" echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" IMAGE_TAG=puffinmuffin/iter8-grafana - echo $IMAGE_TAG + echo "IMAGE_TAG: $IMAGE_TAG" echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" # - name: Get owner @@ -75,12 +75,6 @@ jobs: needs: assets runs-on: ubuntu-latest steps: - - env: - VERSION: ${{needs.assets.outputs.VERSION}} - IMAGE_TAG: ${{needs.assets.outputs.IMAGE_TAG}} - run: | - echo $IMAGE_TAG - env | sort - name: Install Go uses: actions/setup-go@v4 with: @@ -100,11 +94,11 @@ jobs: kubectl expose deploy httpbin --port=80 - name: install controller run: | - helm install iter8 charts/controller --set image=${IMAGE_TAG} --set logLevel=trace + helm install iter8 charts/controller --set image=${{needs.assets.outputs.IMAGE_TAG}} --set logLevel=trace - name: load-test-http in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${IMAGE_TAG} \ + --set iter8Image=${{needs.assets.outputs.IMAGE_TAG}} \ --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ From 483d6a14e08cea1afb0f8fb42bec4ea5ea493311 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 13:39:00 -0400 Subject: [PATCH 092/121] Fix link using quotes Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index a20fa5c62..a084c22ff 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -7,7 +7,7 @@ on: jobs: assets: - name: Publish binaries + name: Build and push test Docker image runs-on: ubuntu-latest steps: - name: Install Go @@ -87,7 +87,7 @@ jobs: uses: helm/kind-action@v1.5.0 with: wait: 300s - - name: run httpbin + - name: Create httpbin application run: | set -e kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 @@ -111,7 +111,8 @@ jobs: - name: check GET /httpDashboard run: | kubectl port-forward service/iter8 8080:8080 & - curl http://localhost:8080/httpDashboard?namespace=default&experiment=default -f + sleep 30 + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f # - name: Upload binaries to release # uses: svenstaro/upload-release-action@v2 # with: From 10d8c06ef53bcac602ed0572cd705dedeb974cf9 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 14:19:27 -0400 Subject: [PATCH 093/121] Add more logging Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index a084c22ff..c2e974826 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -89,12 +89,15 @@ jobs: wait: 300s - name: Create httpbin application run: | - set -e kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 kubectl expose deploy httpbin --port=80 - - name: install controller + - name: Install controller run: | helm install iter8 charts/controller --set image=${{needs.assets.outputs.IMAGE_TAG}} --set logLevel=trace + - name: Controller log + run: | + kubectl get all + kubectl get service/iter8 - name: load-test-http in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ @@ -106,8 +109,13 @@ jobs: --set http.url=http://httpbin.default/get \ --set http.duration="3s" sleep 60 - iter8 k log - iter8 k delete + # iter8 k log + # iter8 k delete + - name: Controller log + run: | + kubectl get all + kubectl get service/iter8 + kubectl get kubectl logs job.batch/default-1-job - name: check GET /httpDashboard run: | kubectl port-forward service/iter8 8080:8080 & From 91f9fd7d6b1e49766d42ab2faf08b627f800dad1 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 14:26:39 -0400 Subject: [PATCH 094/121] Fix logging Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c2e974826..4b4834aaa 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -115,7 +115,7 @@ jobs: run: | kubectl get all kubectl get service/iter8 - kubectl get kubectl logs job.batch/default-1-job + kubectl logs job.batch/default-1-job - name: check GET /httpDashboard run: | kubectl port-forward service/iter8 8080:8080 & From 6a5f4e91f17880ec04ccd7e9bb594749c80442e6 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 15:09:16 -0400 Subject: [PATCH 095/121] Job working but needed more logging Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 4b4834aaa..a7cc51afb 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -14,24 +14,24 @@ jobs: uses: actions/setup-go@v4 with: go-version: 1.19 - - uses: actions/checkout@v3 - - name: Create Dockerfile.dev - run: | - echo "# Small linux image with iter8 binary - FROM debian:buster-slim + # - uses: actions/checkout@v3 + # - name: Create Dockerfile.dev + # run: | + # echo "# Small linux image with iter8 binary + # FROM debian:buster-slim - # Install curl - RUN apt-get update && apt-get install -y curl + # # Install curl + # RUN apt-get update && apt-get install -y curl - # Download iter8 compressed binary - # use COPY instead of wget - COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz + # # Download iter8 compressed binary + # # use COPY instead of wget + # COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz - # Extract iter8 - RUN tar -xvf iter8-linux-amd64.tar.gz + # # Extract iter8 + # RUN tar -xvf iter8-linux-amd64.tar.gz - # Extract iter8 - RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev + # # Extract iter8 + # RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - name: Get version and tag id: versionImageTag run: | @@ -107,15 +107,18 @@ jobs: --set ready.service=httpbin \ --set ready.timeout=60s \ --set http.url=http://httpbin.default/get \ - --set http.duration="3s" + --set http.duration="3s" \ + --set logLevel=trace sleep 60 # iter8 k log # iter8 k delete - name: Controller log run: | kubectl get all - kubectl get service/iter8 + - run: | kubectl logs job.batch/default-1-job + - run: | + kubectl logs service/iter8 - name: check GET /httpDashboard run: | kubectl port-forward service/iter8 8080:8080 & From afaffed843a11b99e28e26bad9e2a1eb4f1761a4 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 15:27:17 -0400 Subject: [PATCH 096/121] Add wait Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index a7cc51afb..434ffc921 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -91,9 +91,11 @@ jobs: run: | kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 kubectl expose deploy httpbin --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin - name: Install controller run: | helm install iter8 charts/controller --set image=${{needs.assets.outputs.IMAGE_TAG}} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: Controller log run: | kubectl get all From 26ba3d7ab22fe60be645af78bf9378b1b4685c94 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 15:46:37 -0400 Subject: [PATCH 097/121] Clean up Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 434ffc921..c6afe0cd0 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -95,11 +95,7 @@ jobs: - name: Install controller run: | helm install iter8 charts/controller --set image=${{needs.assets.outputs.IMAGE_TAG}} --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - name: Controller log - run: | - kubectl get all - kubectl get service/iter8 + kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: load-test-http in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ @@ -112,19 +108,15 @@ jobs: --set http.duration="3s" \ --set logLevel=trace sleep 60 - # iter8 k log - # iter8 k delete - - name: Controller log + - name: Test additional Iter8 commands run: | - kubectl get all - - run: | - kubectl logs job.batch/default-1-job - - run: | - kubectl logs service/iter8 - - name: check GET /httpDashboard + iter8 k log + iter8 k delete + - name: Expose metrics service run: | kubectl port-forward service/iter8 8080:8080 & - sleep 30 + - name: Check GET /httpDashboard + run: | curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f # - name: Upload binaries to release # uses: svenstaro/upload-release-action@v2 From 9a23f1dabf058c3b5e8a40aadeb7b1844f013a1e Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 15:55:39 -0400 Subject: [PATCH 098/121] Add Docker login Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 69 +++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 33 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c6afe0cd0..e534123c8 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -14,50 +14,53 @@ jobs: uses: actions/setup-go@v4 with: go-version: 1.19 - # - uses: actions/checkout@v3 - # - name: Create Dockerfile.dev - # run: | - # echo "# Small linux image with iter8 binary - # FROM debian:buster-slim + - uses: actions/checkout@v3 + - name: Create Dockerfile.dev + run: | + echo "# Small linux image with iter8 binary + FROM debian:buster-slim - # # Install curl - # RUN apt-get update && apt-get install -y curl + # Install curl + RUN apt-get update && apt-get install -y curl - # # Download iter8 compressed binary - # # use COPY instead of wget - # COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz + # Download iter8 compressed binary + # use COPY instead of wget + COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz - # # Extract iter8 - # RUN tar -xvf iter8-linux-amd64.tar.gz + # Extract iter8 + RUN tar -xvf iter8-linux-amd64.tar.gz - # # Extract iter8 - # RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev + # Extract iter8 + RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - name: Get version and tag - id: versionImageTag + id: versionTag run: | # GitHub ref name VERSION=${GITHUB_REF_NAME} echo "VERSION: $VERSION" echo "VERSION=${VERSION}" >> "$GITHUB_ENV" echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" - - IMAGE_TAG=puffinmuffin/iter8-grafana + - name: Get image tag + id: imageTag + run: | + # Docker image + IMAGE_TAG=$(echo ${OWNER}/iter8:${VERSION}) echo "IMAGE_TAG: $IMAGE_TAG" echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - # - name: Get owner - # run: | - # ownerrepo=${{ github.repository }} - # owner=$(echo $ownerrepo | cut -f1 -d/) - # if [[ "$owner" == "iter8-tools" ]]; then - # owner=iter8 - # fi - # echo "OWNER=$owner" >> $GITHUB_ENV - # - uses: docker/setup-buildx-action@v2 - # - uses: docker/login-action@v2 - # with: - # username: ${{ secrets.DOCKERHUB_USERNAME }} - # password: ${{ secrets.DOCKERHUB_SECRET }} + - name: Get owner + run: | + ownerrepo=${{ github.repository }} + owner=$(echo $ownerrepo | cut -f1 -d/) + if [[ "$owner" == "iter8-tools" ]]; then + owner=iter8 + fi + echo "OWNER=$owner" >> $GITHUB_ENV + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_SECRET }} # - uses: docker/build-push-action@v4 # with: # platforms: linux/amd64 @@ -66,9 +69,9 @@ jobs: # build-args: | # TAG=v${{ env.VERSION }} # file: "{context}/Dockerfile.dev" - outputs: - VERSION: ${{ steps.versionImageTag.outputs.VERSION }} - IMAGE_TAG: ${{ steps.versionImageTag.outputs.IMAGE_TAG }} + # outputs: + # VERSION: ${{ steps.versionTag.outputs.VERSION }} + # IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} kubernetes-load-test-http: name: HTTP load test (with readiness) at the edge of Kubernetes From a89d6194679522736f4b7f02beb6b2fcb5f3bb84 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 15:59:18 -0400 Subject: [PATCH 099/121] Clean up Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 64 ++++++------------------------------- 1 file changed, 9 insertions(+), 55 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index e534123c8..112bd0b36 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -40,14 +40,6 @@ jobs: echo "VERSION: $VERSION" echo "VERSION=${VERSION}" >> "$GITHUB_ENV" echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" - - name: Get image tag - id: imageTag - run: | - # Docker image - IMAGE_TAG=$(echo ${OWNER}/iter8:${VERSION}) - echo "IMAGE_TAG: $IMAGE_TAG" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - name: Get owner run: | ownerrepo=${{ github.repository }} @@ -56,6 +48,14 @@ jobs: owner=iter8 fi echo "OWNER=$owner" >> $GITHUB_ENV + - name: Get image tag + id: imageTag + run: | + # Docker image + IMAGE_TAG=$(echo ${OWNER}/iter8:${VERSION}) + echo "IMAGE_TAG: $IMAGE_TAG" + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - uses: docker/setup-buildx-action@v2 - uses: docker/login-action@v2 with: @@ -120,50 +120,4 @@ jobs: kubectl port-forward service/iter8 8080:8080 & - name: Check GET /httpDashboard run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - # - name: Upload binaries to release - # uses: svenstaro/upload-release-action@v2 - # with: - # repo_token: ${{ secrets.GITHUB_TOKEN }} - # file: _dist/iter8-*.tar.gz - # tag: ${{ github.ref }} - # overwrite: true - # file_glob: true - # - name: Create checksum - # run: | - # VERSION=${GITHUB_REF#refs/*/} - # echo "VERSION=$VERSION" >> $GITHUB_ENV - # wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip - # sha256sum ${VERSION}.zip > checksum.txt - # wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz - # sha256sum ${VERSION}.tar.gz >> checksum.txt - # cd _dist - # for f in iter8-*.tar.gz - # do - # sha256sum ${f} >> ../checksum.txt - # done - # # pick up darwin checksum and export it - # echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV - # - name: Upload checksum to release - # uses: svenstaro/upload-release-action@v2 - # with: - # repo_token: ${{ secrets.GITHUB_TOKEN }} - # asset_name: checksum.txt - # file: checksum.txt - # tag: ${{ github.ref }} - # overwrite: true - -# build-and-push: -# name: Push Iter8 image to Docker Hub -# needs: assets -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: 0 -# - name: Get version -# run: | -# tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') -# # Strip "v" prefix from tagref -# echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV -# echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f \ No newline at end of file From 84937040e135fbfe3e6228d76a5ae08d3c75ea8c Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 16:01:37 -0400 Subject: [PATCH 100/121] Correct image repo name Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 112bd0b36..f0a0d2b7e 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -42,17 +42,18 @@ jobs: echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" - name: Get owner run: | - ownerrepo=${{ github.repository }} - owner=$(echo $ownerrepo | cut -f1 -d/) - if [[ "$owner" == "iter8-tools" ]]; then - owner=iter8 + OWNER_REPO=${{ github.repository }} + OWNER=$(echo $OWNER_REPO | cut -f1 -d/) + if [[ "$OWNER" == "iter8-tools" ]]; then + OWNER=iter8 fi - echo "OWNER=$owner" >> $GITHUB_ENV + echo "OWNER: $OWNER" + echo "OWNER=$OWNER" >> $GITHUB_ENV - name: Get image tag id: imageTag run: | # Docker image - IMAGE_TAG=$(echo ${OWNER}/iter8:${VERSION}) + IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) echo "IMAGE_TAG: $IMAGE_TAG" echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" From bb0ca0edbad10b4fa7a1fc677e1724e845f01ead Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Wed, 23 Aug 2023 16:03:23 -0400 Subject: [PATCH 101/121] Add everything Signed-off-by: Alan Cha --- .github/workflows/test.yaml | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index f0a0d2b7e..251475e38 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -62,17 +62,15 @@ jobs: with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_SECRET }} - # - uses: docker/build-push-action@v4 - # with: - # platforms: linux/amd64 - # tags: ${{ env.IMAGE_TAG }} - # push: true - # build-args: | - # TAG=v${{ env.VERSION }} - # file: "{context}/Dockerfile.dev" - # outputs: - # VERSION: ${{ steps.versionTag.outputs.VERSION }} - # IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} + - uses: docker/build-push-action@v4 + with: + platforms: linux/amd64 + tags: ${{ env.IMAGE_TAG }} + push: true + file: "{context}/Dockerfile.dev" + outputs: + VERSION: ${{ steps.versionTag.outputs.VERSION }} + IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} kubernetes-load-test-http: name: HTTP load test (with readiness) at the edge of Kubernetes From ad38fd76a59f610b562320a5fa4c326576e444ec Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 08:25:45 -0400 Subject: [PATCH 102/121] Adjust comments Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 11 +++++------ .github/workflows/testcharts.yaml | 12 ++++++------ .github/workflows/testperformance.yaml | 9 ++++----- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 848744b6f..474901f0b 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -97,7 +97,7 @@ # uses: helm/kind-action@v1.5.0 # with: # wait: 300s -# - name: Create app +# - name: Create httpbin application # run: | # kubectl create deployment httpbin --image=kennethreitz/httpbin # kubectl expose deployment httpbin --type=ClusterIP --port=80 @@ -124,13 +124,12 @@ # uses: helm/kind-action@v1.5.0 # with: # wait: 300s -# - name: Create app +# - name: Create routeguide application # run: | # kubectl create deployment routeguide --image=golang --port=50051 \ # -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" # kubectl expose deployment routeguide --port=50051 # kubectl wait --for=condition=available --timeout=60s deployment/routeguide - # - name: Test gRPC service with grpcurl # run: | # curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml @@ -163,7 +162,7 @@ # uses: helm/kind-action@v1.5.0 # with: # wait: 300s -# - name: Create app +# - name: Create hello application # run: | # kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 # kubectl expose deploy hello --port=50051 @@ -192,7 +191,7 @@ # uses: helm/kind-action@v1.5.0 # with: # wait: 300s -# - name: Deploy resources to cluster +# - name: Create httpbin application # run: | # kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 # kubectl expose deploy httpbin --port=80 @@ -216,7 +215,7 @@ # uses: helm/kind-action@v1.5.0 # with: # wait: 300s -# - name: Deploy resources to cluster +# - name: Create httpbin application # run: | # kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 # kubectl expose deploy httpbin --port=80 diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index 317b45371..ea820c43d 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -66,7 +66,7 @@ # wait: 300s # node_image: ${{ matrix.version }} -# - name: Create app +# - name: Create httpbin application # if: steps.modified-files.outputs.any_modified == 'true' # run: | # kubectl create deployment httpbin --image=kennethreitz/httpbin @@ -118,7 +118,7 @@ # wait: 300s # node_image: ${{ matrix.version }} -# - name: Create app +# - name: Create httpbin application # if: steps.modified-files.outputs.any_modified == 'true' # run: | # kubectl create deployment httpbin --image=kennethreitz/httpbin @@ -171,7 +171,7 @@ # wait: 300s # node_image: ${{ matrix.version }} -# - name: Create app +# - name: Create httpbin application # if: steps.modified-files.outputs.any_modified == 'true' # run: | # kubectl create deployment httpbin --image=kennethreitz/httpbin @@ -226,7 +226,7 @@ # wait: 300s # node_image: ${{ matrix.version }} -# - name: Deploy gRPC service in the Kubernetes cluster +# - name: Create routeguide application # if: steps.modified-files.outputs.any_modified == 'true' # run: | # kubectl create deployment routeguide --image=golang --port=50051 \ @@ -293,7 +293,7 @@ # wait: 300s # node_image: ${{ matrix.version }} -# - name: Deploy gRPC service in the Kubernetes cluster +# - name: Create routeguide application # if: steps.modified-files.outputs.any_modified == 'true' # run: | # kubectl create deployment routeguide --image=golang --port=50051 \ @@ -362,7 +362,7 @@ # wait: 300s # node_image: ${{ matrix.version }} -# - name: Create app +# - name: Create hello application # if: steps.modified-files.outputs.any_modified == 'true' # run: | # kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index 2b467119b..705f04d9f 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -44,10 +44,9 @@ # with: # wait: 300s -# - name: run httpbin +# - name: Create httpbin application # run: | -# set -e -# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 +# # kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 # kubectl expose deploy httpbin --port=80 # - name: load-test-http in Kubernetes @@ -111,7 +110,7 @@ # with: # wait: 300s -# - name: Deploy gRPC service in the Kubernetes cluster +# - name: Create routeguide application # run: | # kubectl create deployment routeguide --image=golang --port=50051 \ # -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" @@ -175,7 +174,7 @@ # with: # wait: 300s -# - name: Deploy gRPC service in the Kubernetes cluster +# - name: Create hello application # run: | # kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 # kubectl expose deploy hello --port=50051 From 8adbc7b77133693b02df6b4caf97c926022fd667 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 08:46:32 -0400 Subject: [PATCH 103/121] Uncomment Signed-off-by: Alan Cha --- .github/workflows/buildpushtestimage.yaml | 73 ++ .github/workflows/testcharts.yaml | 831 +++++++++++----------- 2 files changed, 488 insertions(+), 416 deletions(-) create mode 100644 .github/workflows/buildpushtestimage.yaml diff --git a/.github/workflows/buildpushtestimage.yaml b/.github/workflows/buildpushtestimage.yaml new file mode 100644 index 000000000..51e159859 --- /dev/null +++ b/.github/workflows/buildpushtestimage.yaml @@ -0,0 +1,73 @@ +name: Build and push test Docker image + +on: + pull_request: + branches: + - master + +jobs: + buildpush: + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + - name: Create Dockerfile.dev + run: | + echo "# Small linux image with iter8 binary + FROM debian:buster-slim + + # Install curl + RUN apt-get update && apt-get install -y curl + + # Download iter8 compressed binary + # use COPY instead of wget + COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz + + # Extract iter8 + RUN tar -xvf iter8-linux-amd64.tar.gz + + # Extract iter8 + RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev + - name: Get version and tag + id: versionTag + run: | + # GitHub ref name + VERSION=${GITHUB_REF_NAME} + echo "VERSION: $VERSION" + echo "VERSION=${VERSION}" >> "$GITHUB_ENV" + - name: Get owner + run: | + OWNER_REPO=${{ github.repository }} + OWNER=$(echo $OWNER_REPO | cut -f1 -d/) + if [[ "$OWNER" == "iter8-tools" ]]; then + OWNER=iter8 + fi + echo "OWNER: $OWNER" + echo "OWNER=$OWNER" >> $GITHUB_ENV + - name: Get image tag + id: imageTag + run: | + # Docker image + IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) + echo "IMAGE_TAG: $IMAGE_TAG" + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_SECRET }} + - uses: docker/build-push-action@v4 + with: + platforms: linux/amd64 + tags: ${{ env.IMAGE_TAG }} + push: true + file: "{context}/Dockerfile.dev" + - name: Repository dispatch to trigger tests + uses: peter-evans/repository-dispatch@v1 + with: + token: ${{ secrets.PAT }} + event-type: testimage + client-payload: '{"VERSION": "${{ env.VERSION }}", "IMAGE_TAG": "${{ env.IMAGE_TAG }}"}' \ No newline at end of file diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index ea820c43d..edbaf4e37 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -1,423 +1,422 @@ -# name: Tests to ensure that changes to charts do not break user experience - -# on: -# pull_request: -# branches: -# - master - -# # Kind versions used to test Iter8 on different versions of Kubernetes -# # From: https://github.com/kubernetes-sigs/kind/releases -# env: -# versions: | -# kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f -# kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f -# kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 -# kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c -# kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 -# kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 -# kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 -# kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 - -# jobs: -# # Get the different Kind versions -# get_versions: -# runs-on: ubuntu-latest - -# steps: -# - name: Get the different Kind versions -# id: set-matrix -# run: | -# # Serialize versions into JSON array -# jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") -# echo $jsonVersions - -# # Output serialized jsonVersions -# echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT - -# outputs: -# matrix: ${{ steps.set-matrix.outputs.matrix }} - -# http-experiment: -# name: HTTP load test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 - -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create httpbin application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment httpbin --image=kennethreitz/httpbin -# kubectl expose deployment httpbin --type=ClusterIP --port=80 -# kubectl wait --for=condition=available --timeout=60s deploy/httpbin - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={http}" \ -# --set http.url="http://httpbin.default/get" \ - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete - -# http-payload-experiment: -# name: HTTP load test with payload -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 - -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create httpbin application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment httpbin --image=kennethreitz/httpbin -# kubectl expose deployment httpbin --type=ClusterIP --port=80 -# kubectl wait --for=condition=available --timeout=60s deploy/httpbin - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={http}" \ -# --set http.url="http://httpbin.default/post" \ -# --set http.payloadStr=hello \ - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete - -# http-multiple-experiment: -# name: HTTP load test with multiple endpoints -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 - -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create httpbin application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment httpbin --image=kennethreitz/httpbin -# kubectl expose deployment httpbin --type=ClusterIP --port=80 -# kubectl wait --for=condition=available --timeout=60s deploy/httpbin - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={http}" \ -# --set http.endpoints.get.url=http://httpbin.default/get \ -# --set http.endpoints.getAnything.url=http://httpbin.default/anything \ -# --set http.endpoints.post.url=http://httpbin.default/post \ -# --set http.endpoints.post.payloadStr=hello \ - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete - -# grpc-experiment: -# name: gRPC load test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 +name: Tests to ensure that changes to charts do not break user experience + +on: + repository_dispatch: + types: [buildimage] + +# Kind versions used to test Iter8 on different versions of Kubernetes +# From: https://github.com/kubernetes-sigs/kind/releases +env: + versions: | + kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f + kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f + kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 + kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c + kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 + kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 + kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 + kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 + +jobs: + # Get the different Kind versions + get_versions: + runs-on: ubuntu-latest + + steps: + - name: Get the different Kind versions + id: set-matrix + run: | + # Serialize versions into JSON array + jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") + echo $jsonVersions + + # Output serialized jsonVersions + echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT + + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + + http-experiment: + name: HTTP load test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create httpbin application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={http}" \ + --set http.url="http://httpbin.default/get" \ + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + sleep 60 + iter8 k log + iter8 k delete + + http-payload-experiment: + name: HTTP load test with payload + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create httpbin application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={http}" \ + --set http.url="http://httpbin.default/post" \ + --set http.payloadStr=hello \ + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + sleep 60 + iter8 k log + iter8 k delete + + http-multiple-experiment: + name: HTTP load test with multiple endpoints + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create httpbin application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={http}" \ + --set http.endpoints.get.url=http://httpbin.default/get \ + --set http.endpoints.getAnything.url=http://httpbin.default/anything \ + --set http.endpoints.post.url=http://httpbin.default/post \ + --set http.endpoints.post.payloadStr=hello \ + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + sleep 60 + iter8 k log + iter8 k delete + + grpc-experiment: + name: gRPC load test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create routeguide application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment routeguide --image=golang --port=50051 \ -# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" -# kubectl expose deployment routeguide --port=50051 -# kubectl wait --for=condition=available --timeout=60s deployment/routeguide - -# - name: Test gRPC service with grpcurl -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml -# kubectl apply -f grpcurl-routeguide.yaml -# sleep 180 -# kubectl logs deploy/sleep + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create routeguide application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + + - name: Test gRPC service with grpcurl + if: steps.modified-files.outputs.any_modified == 'true' + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=routeguide \ -# --set ready.service=routeguide \ -# --set ready.timeout=60s \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete - -# grpc-multiple-experiment: -# name: gRPC load test with multiple endpoints -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.call=routeguide.RouteGuide.GetFeature \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + sleep 60 + iter8 k log + iter8 k delete + + grpc-multiple-experiment: + name: gRPC load test with multiple endpoints + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create routeguide application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment routeguide --image=golang --port=50051 \ -# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" -# kubectl expose deployment routeguide --port=50051 -# kubectl wait --for=condition=available --timeout=60s deployment/routeguide - -# - name: Test gRPC service with grpcurl -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml -# kubectl apply -f grpcurl-routeguide.yaml -# sleep 180 -# kubectl logs deploy/sleep + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create routeguide application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + + - name: Test gRPC service with grpcurl + if: steps.modified-files.outputs.any_modified == 'true' + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=routeguide \ -# --set ready.service=routeguide \ -# --set ready.timeout=60s \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ -# --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ -# --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete - -# grpc-experiment2: -# name: gRPC load test 2 -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ + --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ + --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + sleep 60 + iter8 k log + iter8 k delete + + grpc-experiment2: + name: gRPC load test 2 + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create hello application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 -# kubectl expose deploy hello --port=50051 -# kubectl wait --for=condition=available --timeout=60s deploy/hello + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create hello application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 + kubectl expose deploy hello --port=50051 + kubectl wait --for=condition=available --timeout=60s deploy/hello -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={grpc}" \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete - -# controller: -# name: Controller test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/controller folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/controller - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={grpc}" \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + sleep 60 + iter8 k log + iter8 k delete + + controller: + name: Controller test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/controller folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/controller + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} -# - name: Start controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# helm install controller charts/controller -f charts/controller/testdata/values.yaml - -# - name: Check controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl rollout status --watch --timeout=60s statefulset.apps/controller + - name: Start controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + helm install controller charts/controller -f charts/controller/testdata/values.yaml + + - name: Check controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl rollout status --watch --timeout=60s statefulset.apps/controller From ada749ffd683865f579ebe6934ed84199699640b Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 08:53:20 -0400 Subject: [PATCH 104/121] Add controller and check to testcharts.yaml Signed-off-by: Alan Cha --- .github/workflows/testcharts.yaml | 96 ++++++++++++++++++++++++++++--- 1 file changed, 87 insertions(+), 9 deletions(-) diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index edbaf4e37..e038577d6 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -72,6 +72,11 @@ jobs: kubectl expose deployment httpbin --type=ClusterIP --port=80 kubectl wait --for=condition=available --timeout=60s deploy/httpbin + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: iter8 k launch if: steps.modified-files.outputs.any_modified == 'true' run: | @@ -80,14 +85,22 @@ jobs: --chartName charts/iter8 \ --set "tasks={http}" \ --set http.url="http://httpbin.default/get" \ + sleep 60 - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - sleep 60 iter8 k log iter8 k delete + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + http-payload-experiment: name: HTTP load test with payload needs: get_versions @@ -124,6 +137,11 @@ jobs: kubectl expose deployment httpbin --type=ClusterIP --port=80 kubectl wait --for=condition=available --timeout=60s deploy/httpbin + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: iter8 k launch if: steps.modified-files.outputs.any_modified == 'true' run: | @@ -133,14 +151,22 @@ jobs: --set "tasks={http}" \ --set http.url="http://httpbin.default/post" \ --set http.payloadStr=hello \ + sleep 60 - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - sleep 60 iter8 k log iter8 k delete + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + http-multiple-experiment: name: HTTP load test with multiple endpoints needs: get_versions @@ -177,6 +203,11 @@ jobs: kubectl expose deployment httpbin --type=ClusterIP --port=80 kubectl wait --for=condition=available --timeout=60s deploy/httpbin + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: iter8 k launch if: steps.modified-files.outputs.any_modified == 'true' run: | @@ -188,14 +219,22 @@ jobs: --set http.endpoints.getAnything.url=http://httpbin.default/anything \ --set http.endpoints.post.url=http://httpbin.default/post \ --set http.endpoints.post.payloadStr=hello \ + sleep 60 - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - sleep 60 iter8 k log iter8 k delete + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + grpc-experiment: name: gRPC load test needs: get_versions @@ -240,7 +279,12 @@ jobs: kubectl apply -f grpcurl-routeguide.yaml sleep 180 kubectl logs deploy/sleep - + + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: iter8 k launch if: steps.modified-files.outputs.any_modified == 'true' run: | @@ -255,14 +299,22 @@ jobs: --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ --set grpc.call=routeguide.RouteGuide.GetFeature \ --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + sleep 60 - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - sleep 60 iter8 k log iter8 k delete + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + grpc-multiple-experiment: name: gRPC load test with multiple endpoints needs: get_versions @@ -307,7 +359,12 @@ jobs: kubectl apply -f grpcurl-routeguide.yaml sleep 180 kubectl logs deploy/sleep - + + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: iter8 k launch if: steps.modified-files.outputs.any_modified == 'true' run: | @@ -324,14 +381,22 @@ jobs: --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ + sleep 60 - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - sleep 60 iter8 k log iter8 k delete + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + grpc-experiment2: name: gRPC load test 2 needs: get_versions @@ -367,7 +432,12 @@ jobs: kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 kubectl expose deploy hello --port=50051 kubectl wait --for=condition=available --timeout=60s deploy/hello - + + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: iter8 k launch if: steps.modified-files.outputs.any_modified == 'true' run: | @@ -378,14 +448,22 @@ jobs: --set grpc.host="hello.default:50051" \ --set grpc.call="helloworld.Greeter.SayHello" \ --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + sleep 60 - name: Try other iter8 k commands if: steps.modified-files.outputs.any_modified == 'true' run: | - sleep 60 iter8 k log iter8 k delete + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + controller: name: Controller test needs: get_versions From 8cf0ca505ea2bf5b4efdef6ed2bbf57f404842a5 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 08:53:53 -0400 Subject: [PATCH 105/121] Uncomment testkustomize.yaml Signed-off-by: Alan Cha --- .github/workflows/testkustomize.yaml | 208 +++++++++++++-------------- 1 file changed, 104 insertions(+), 104 deletions(-) diff --git a/.github/workflows/testkustomize.yaml b/.github/workflows/testkustomize.yaml index 96b47eeb8..dc17ff7fa 100644 --- a/.github/workflows/testkustomize.yaml +++ b/.github/workflows/testkustomize.yaml @@ -1,106 +1,106 @@ -# name: Test kustomize experiments - -# on: -# pull_request: - -# # Kind versions used to test Iter8 on different versions of Kubernetes -# # From: https://github.com/kubernetes-sigs/kind/releases -# env: -# versions: | -# kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f -# kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f -# kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 -# kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c -# kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 -# kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 -# kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 -# kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 - -# jobs: -# # Get the paths for the Helm charts to lint -# get_versions: -# runs-on: ubuntu-latest - -# steps: -# - name: Get the paths for Helm charts to lint -# id: set-matrix -# run: | -# # Serialize versions into JSON array -# jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") -# echo $jsonVersions - -# # Output serialized jsonVersions -# echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT - -# outputs: -# matrix: ${{ steps.set-matrix.outputs.matrix }} - -# controller: -# name: Controller test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/controller folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/controller - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} +name: Test kustomize experiments + +on: + pull_request: + +# Kind versions used to test Iter8 on different versions of Kubernetes +# From: https://github.com/kubernetes-sigs/kind/releases +env: + versions: | + kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f + kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f + kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 + kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c + kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 + kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 + kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 + kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 + +jobs: + # Get the paths for the Helm charts to lint + get_versions: + runs-on: ubuntu-latest + + steps: + - name: Get the paths for Helm charts to lint + id: set-matrix + run: | + # Serialize versions into JSON array + jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") + echo $jsonVersions + + # Output serialized jsonVersions + echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT + + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + + controller: + name: Controller test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/controller folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/controller + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} -# - name: Start controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl apply -k kustomize/controller/namespaceScoped - -# - name: Check controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 - -# controller-clusterScoped: -# name: Controller cluster scoped test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/controller folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/controller - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} + - name: Start controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl apply -k kustomize/controller/namespaceScoped + + - name: Check controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 + + controller-clusterScoped: + name: Controller cluster scoped test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/controller folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/controller + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} -# - name: Start controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl apply -k kustomize/controller/clusterScoped - -# - name: Check controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file + - name: Start controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl apply -k kustomize/controller/clusterScoped + + - name: Check controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file From 6413d3974d4f39d6a77869e311686c8329bf8a01 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 09:09:07 -0400 Subject: [PATCH 106/121] Uncomment testcharts.yaml Signed-off-by: Alan Cha --- .github/workflows/testcharts.yaml | 15 +- .github/workflows/testperformance.yaml | 386 ++++++++++++------------- 2 files changed, 200 insertions(+), 201 deletions(-) diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index e038577d6..a299ff3f5 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -1,8 +1,7 @@ name: Tests to ensure that changes to charts do not break user experience on: - repository_dispatch: - types: [buildimage] + pull_request: # Kind versions used to test Iter8 on different versions of Kubernetes # From: https://github.com/kubernetes-sigs/kind/releases @@ -74,7 +73,7 @@ jobs: - name: Install controller run: | - helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + helm install iter8 charts/controller --set logLevel=trace kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: iter8 k launch @@ -139,7 +138,7 @@ jobs: - name: Install controller run: | - helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + helm install iter8 charts/controller --set logLevel=trace kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: iter8 k launch @@ -205,7 +204,7 @@ jobs: - name: Install controller run: | - helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + helm install iter8 charts/controller --set logLevel=trace kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: iter8 k launch @@ -282,7 +281,7 @@ jobs: - name: Install controller run: | - helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + helm install iter8 charts/controller --set logLevel=trace kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: iter8 k launch @@ -362,7 +361,7 @@ jobs: - name: Install controller run: | - helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + helm install iter8 charts/controller --set logLevel=trace kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: iter8 k launch @@ -435,7 +434,7 @@ jobs: - name: Install controller run: | - helm install iter8 charts/controller --set image=${{ github.event.client_payload.IMAGE_TAG }} --set logLevel=trace + helm install iter8 charts/controller --set logLevel=trace kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: iter8 k launch diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index 705f04d9f..cb4a52561 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -1,211 +1,211 @@ -# name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) +name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) -# on: -# pull_request: +on: + pull_request: -# jobs: -# unit-test: -# name: Unit test -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - name: Check out code into the Go module directory -# uses: actions/checkout@v3 -# - name: Test and compute coverage -# run: make coverage # includes vet and lint -# - name: Enforce coverage -# run: | -# export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') -# echo "code coverage is at ${COVERAGE}" -# if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ -# echo "all good... coverage is above 76.0%"; -# else \ -# echo "not good... coverage is not above 76.0%"; -# exit 1 -# fi +jobs: + unit-test: + name: Unit test + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Test and compute coverage + run: make coverage # includes vet and lint + - name: Enforce coverage + run: | + export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') + echo "code coverage is at ${COVERAGE}" + if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ + echo "all good... coverage is above 76.0%"; + else \ + echo "not good... coverage is not above 76.0%"; + exit 1 + fi -# kubernetes-load-test-http: -# name: HTTP load test (with readiness) at the edge of Kubernetes -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - name: Check out code into the Go module directory -# uses: actions/checkout@v3 -# - name: Build and install Iter8 -# run: make install -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s + kubernetes-load-test-http: + name: HTTP load test (with readiness) at the edge of Kubernetes + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s -# - name: Create httpbin application -# run: | -# # kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 -# kubectl expose deploy httpbin --port=80 + - name: Create httpbin application + run: | +# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + kubectl expose deploy httpbin --port=80 -# - name: load-test-http in Kubernetes -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set "tasks={ready,http}" \ -# --set ready.deploy=httpbin \ -# --set ready.service=httpbin \ -# --set ready.timeout=60s \ -# --set http.url=http://httpbin.default/get \ -# --set http.duration="3s" -# sleep 60 -# iter8 k log -# iter8 k delete + - name: load-test-http in Kubernetes + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set "tasks={ready,http}" \ + --set ready.deploy=httpbin \ + --set ready.service=httpbin \ + --set ready.timeout=60s \ + --set http.url=http://httpbin.default/get \ + --set http.duration="3s" + sleep 60 + iter8 k log + iter8 k delete -# - name: load-test-http with payload in Kubernetes -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set "tasks={ready,http}" \ -# --set ready.deploy=httpbin \ -# --set ready.service=httpbin \ -# --set ready.timeout=60s \ -# --set http.url=http://httpbin.default/post \ -# --set http.payloadStr=hello \ -# --set http.duration="3s" -# sleep 60 -# iter8 k log -# iter8 k delete + - name: load-test-http with payload in Kubernetes + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set "tasks={ready,http}" \ + --set ready.deploy=httpbin \ + --set ready.service=httpbin \ + --set ready.timeout=60s \ + --set http.url=http://httpbin.default/post \ + --set http.payloadStr=hello \ + --set http.duration="3s" + sleep 60 + iter8 k log + iter8 k delete -# - name: load-test-http with multiple endpoints in Kubernetes -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set "tasks={ready,http}" \ -# --set ready.deploy=httpbin \ -# --set ready.service=httpbin \ -# --set ready.timeout=60s \ -# --set http.endpoints.get.url=http://httpbin.default/get \ -# --set http.endpoints.getAnything.url=http://httpbin.default/anything \ -# --set http.endpoints.post.url=http://httpbin.default/post \ -# --set http.endpoints.post.payloadStr=hello \ -# --set http.duration="3s" -# sleep 60 -# iter8 k log -# iter8 k delete + - name: load-test-http with multiple endpoints in Kubernetes + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set "tasks={ready,http}" \ + --set ready.deploy=httpbin \ + --set ready.service=httpbin \ + --set ready.timeout=60s \ + --set http.endpoints.get.url=http://httpbin.default/get \ + --set http.endpoints.getAnything.url=http://httpbin.default/anything \ + --set http.endpoints.post.url=http://httpbin.default/post \ + --set http.endpoints.post.payloadStr=hello \ + --set http.duration="3s" + sleep 60 + iter8 k log + iter8 k delete -# kubernetes-load-test-grpc: -# name: gRPC load test with various URLs -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - name: Check out code into the Go module directory -# uses: actions/checkout@v3 -# - name: Build and install Iter8 -# run: make install + kubernetes-load-test-grpc: + name: gRPC load test with various URLs + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s -# - name: Create routeguide application -# run: | -# kubectl create deployment routeguide --image=golang --port=50051 \ -# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" -# kubectl expose deployment routeguide --port=50051 -# kubectl wait --for=condition=available --timeout=60s deployment/routeguide + - name: Create routeguide application + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide -# - name: Test gRPC service with grpcurl -# run: | -# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml -# kubectl apply -f grpcurl-routeguide.yaml -# sleep 180 -# kubectl logs deploy/sleep + - name: Test gRPC service with grpcurl + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep -# - name: load test grpc service -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=routeguide \ -# --set ready.service=routeguide \ -# --set ready.timeout=60s \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json -# sleep 60 -# iter8 k log -# iter8 k delete + - name: load test grpc service + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.call=routeguide.RouteGuide.GetFeature \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json + sleep 60 + iter8 k log + iter8 k delete -# - name: load test grpc service with multiple endpoints -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=routeguide \ -# --set ready.service=routeguide \ -# --set ready.timeout=60s \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ -# --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ -# --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json -# sleep 60 -# iter8 k log -# iter8 k delete + - name: load test grpc service with multiple endpoints + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ + --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ + --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json + sleep 60 + iter8 k log + iter8 k delete -# kubernetes-load-test-grpc2: -# name: gRPC load test 2 with various URLs -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - name: Check out code into the Go module directory -# uses: actions/checkout@v3 -# - name: Build and install Iter8 -# run: make install + kubernetes-load-test-grpc2: + name: gRPC load test 2 with various URLs + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s -# - name: Create hello application -# run: | -# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 -# kubectl expose deploy hello --port=50051 + - name: Create hello application + run: | + kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 + kubectl expose deploy hello --port=50051 -# - name: load test grpc service with protoURL -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=hello \ -# --set ready.service=hello \ -# --set ready.timeout=60s \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ -# --set grpc.data.name="frodo" -# sleep 60 -# iter8 k log -# iter8 k delete + - name: load test grpc service with protoURL + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=hello \ + --set ready.service=hello \ + --set ready.timeout=60s \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + --set grpc.data.name="frodo" + sleep 60 + iter8 k log + iter8 k delete -# - name: load test grpc service with proto/data/metadata URLs -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=hello \ -# --set ready.service=hello \ -# --set ready.timeout=60s \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ -# --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ -# --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" -# sleep 60 -# iter8 k log -# iter8 k delete + - name: load test grpc service with proto/data/metadata URLs + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=hello \ + --set ready.service=hello \ + --set ready.timeout=60s \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ + --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" + sleep 60 + iter8 k log + iter8 k delete From fa69836cd94cee950379d2f65760be2059bd693a Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 09:22:51 -0400 Subject: [PATCH 107/121] Add build-push-test-image to testperformance.yaml Signed-off-by: Alan Cha --- .github/workflows/buildpushtestimage.yaml | 73 ---------- .github/workflows/testperformance.yaml | 158 +++++++++++++++++++++- 2 files changed, 155 insertions(+), 76 deletions(-) delete mode 100644 .github/workflows/buildpushtestimage.yaml diff --git a/.github/workflows/buildpushtestimage.yaml b/.github/workflows/buildpushtestimage.yaml deleted file mode 100644 index 51e159859..000000000 --- a/.github/workflows/buildpushtestimage.yaml +++ /dev/null @@ -1,73 +0,0 @@ -name: Build and push test Docker image - -on: - pull_request: - branches: - - master - -jobs: - buildpush: - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: Create Dockerfile.dev - run: | - echo "# Small linux image with iter8 binary - FROM debian:buster-slim - - # Install curl - RUN apt-get update && apt-get install -y curl - - # Download iter8 compressed binary - # use COPY instead of wget - COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz - - # Extract iter8 - RUN tar -xvf iter8-linux-amd64.tar.gz - - # Extract iter8 - RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - - name: Get version and tag - id: versionTag - run: | - # GitHub ref name - VERSION=${GITHUB_REF_NAME} - echo "VERSION: $VERSION" - echo "VERSION=${VERSION}" >> "$GITHUB_ENV" - - name: Get owner - run: | - OWNER_REPO=${{ github.repository }} - OWNER=$(echo $OWNER_REPO | cut -f1 -d/) - if [[ "$OWNER" == "iter8-tools" ]]; then - OWNER=iter8 - fi - echo "OWNER: $OWNER" - echo "OWNER=$OWNER" >> $GITHUB_ENV - - name: Get image tag - id: imageTag - run: | - # Docker image - IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) - echo "IMAGE_TAG: $IMAGE_TAG" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" - - uses: docker/setup-buildx-action@v2 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_SECRET }} - - uses: docker/build-push-action@v4 - with: - platforms: linux/amd64 - tags: ${{ env.IMAGE_TAG }} - push: true - file: "{context}/Dockerfile.dev" - - name: Repository dispatch to trigger tests - uses: peter-evans/repository-dispatch@v1 - with: - token: ${{ secrets.PAT }} - event-type: testimage - client-payload: '{"VERSION": "${{ env.VERSION }}", "IMAGE_TAG": "${{ env.IMAGE_TAG }}"}' \ No newline at end of file diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index cb4a52561..383042c24 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -27,8 +27,75 @@ jobs: exit 1 fi + build-push-test-image: + name: Build and push test Docker image + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + - name: Create Dockerfile.dev + run: | + echo "# Small linux image with iter8 binary + FROM debian:buster-slim + + # Install curl + RUN apt-get update && apt-get install -y curl + + # Download iter8 compressed binary + # use COPY instead of wget + COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz + + # Extract iter8 + RUN tar -xvf iter8-linux-amd64.tar.gz + + # Extract iter8 + RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev + - name: Get version and tag + id: versionTag + run: | + # GitHub ref name + VERSION=${GITHUB_REF_NAME} + echo "VERSION: $VERSION" + echo "VERSION=${VERSION}" >> "$GITHUB_ENV" + echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" + - name: Get owner + run: | + OWNER_REPO=${{ github.repository }} + OWNER=$(echo $OWNER_REPO | cut -f1 -d/) + if [[ "$OWNER" == "iter8-tools" ]]; then + OWNER=iter8 + fi + echo "OWNER: $OWNER" + echo "OWNER=$OWNER" >> $GITHUB_ENV + - name: Get image tag + id: imageTag + run: | + # Docker image + IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) + echo "IMAGE_TAG: $IMAGE_TAG" + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_SECRET }} + - uses: docker/build-push-action@v4 + with: + platforms: linux/amd64 + tags: ${{ env.IMAGE_TAG }} + push: true + file: "{context}/Dockerfile.dev" + outputs: + VERSION: ${{ steps.versionTag.outputs.VERSION }} + IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} + kubernetes-load-test-http: name: HTTP load test (with readiness) at the edge of Kubernetes + needs: build-push-test-image runs-on: ubuntu-latest steps: - name: Install Go @@ -46,12 +113,22 @@ jobs: - name: Create httpbin application run: | -# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 kubectl expose deploy httpbin --port=80 + + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & - name: load-test-http in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ @@ -59,12 +136,20 @@ jobs: --set http.url=http://httpbin.default/get \ --set http.duration="3s" sleep 60 + + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete - + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + - name: load-test-http with payload in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ @@ -73,12 +158,20 @@ jobs: --set http.payloadStr=hello \ --set http.duration="3s" sleep 60 + + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + - name: load-test-http with multiple endpoints in Kubernetes run: | iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ --set "tasks={ready,http}" \ --set ready.deploy=httpbin \ --set ready.service=httpbin \ @@ -89,11 +182,19 @@ jobs: --set http.endpoints.post.payloadStr=hello \ --set http.duration="3s" sleep 60 + + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + kubernetes-load-test-grpc: name: gRPC load test with various URLs + needs: build-push-test-image runs-on: ubuntu-latest steps: - name: Install Go @@ -124,9 +225,19 @@ jobs: sleep 180 kubectl logs deploy/sleep + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + - name: load test grpc service run: | iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ --set "tasks={ready,grpc}" \ --set ready.deploy=routeguide \ --set ready.service=routeguide \ @@ -136,12 +247,20 @@ jobs: --set grpc.call=routeguide.RouteGuide.GetFeature \ --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json sleep 60 + + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + - name: load test grpc service with multiple endpoints run: | iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ --set "tasks={ready,grpc}" \ --set ready.deploy=routeguide \ --set ready.service=routeguide \ @@ -153,11 +272,19 @@ jobs: --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json sleep 60 + + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + kubernetes-load-test-grpc2: name: gRPC load test 2 with various URLs + needs: build-push-test-image runs-on: ubuntu-latest steps: - name: Install Go @@ -179,9 +306,19 @@ jobs: kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 kubectl expose deploy hello --port=50051 + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + - name: load test grpc service with protoURL run: | iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ --set "tasks={ready,grpc}" \ --set ready.deploy=hello \ --set ready.service=hello \ @@ -191,12 +328,20 @@ jobs: --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ --set grpc.data.name="frodo" sleep 60 + + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete - + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + - name: load test grpc service with proto/data/metadata URLs run: | iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ --set "tasks={ready,grpc}" \ --set ready.deploy=hello \ --set ready.service=hello \ @@ -207,5 +352,12 @@ jobs: --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" sleep 60 + + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f From b9d4fd628d10bbf12391eff231da1dd01671e9f2 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 09:25:02 -0400 Subject: [PATCH 108/121] Uncomment assets.yaml Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 444 +++++++++++++++++----------------- 1 file changed, 222 insertions(+), 222 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 474901f0b..730d9953e 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -1,231 +1,231 @@ -# name: Publish binaries and Docker image +name: Publish binaries and Docker image -# on: -# release: -# types: [published] +on: + release: + types: [published] -# jobs: -# assets: -# name: Publish binaries -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - uses: actions/checkout@v3 -# - name: Build binaries -# run: | -# VERSION=${GITHUB_REF#refs/*/} -# echo "Version: ${VERSION}" -# make dist -# - name: Upload binaries to release -# uses: svenstaro/upload-release-action@v2 -# with: -# repo_token: ${{ secrets.GITHUB_TOKEN }} -# file: _dist/iter8-*.tar.gz -# tag: ${{ github.ref }} -# overwrite: true -# file_glob: true -# - name: Create checksum -# run: | -# VERSION=${GITHUB_REF#refs/*/} -# echo "VERSION=$VERSION" >> $GITHUB_ENV -# wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip -# sha256sum ${VERSION}.zip > checksum.txt -# wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz -# sha256sum ${VERSION}.tar.gz >> checksum.txt -# cd _dist -# for f in iter8-*.tar.gz -# do -# sha256sum ${f} >> ../checksum.txt -# done -# # pick up darwin checksum and export it -# echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV -# - name: Upload checksum to release -# uses: svenstaro/upload-release-action@v2 -# with: -# repo_token: ${{ secrets.GITHUB_TOKEN }} -# asset_name: checksum.txt -# file: checksum.txt -# tag: ${{ github.ref }} -# overwrite: true +jobs: + assets: + name: Publish binaries + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + - name: Build binaries + run: | + VERSION=${GITHUB_REF#refs/*/} + echo "Version: ${VERSION}" + make dist + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: _dist/iter8-*.tar.gz + tag: ${{ github.ref }} + overwrite: true + file_glob: true + - name: Create checksum + run: | + VERSION=${GITHUB_REF#refs/*/} + echo "VERSION=$VERSION" >> $GITHUB_ENV + wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip + sha256sum ${VERSION}.zip > checksum.txt + wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz + sha256sum ${VERSION}.tar.gz >> checksum.txt + cd _dist + for f in iter8-*.tar.gz + do + sha256sum ${f} >> ../checksum.txt + done + # pick up darwin checksum and export it + echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV + - name: Upload checksum to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + asset_name: checksum.txt + file: checksum.txt + tag: ${{ github.ref }} + overwrite: true -# build-and-push: -# name: Push Iter8 image to Docker Hub -# needs: assets -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: 0 -# - name: Get version -# run: | -# tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') -# # Strip "v" prefix from tagref -# echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV -# echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV -# - name: Get owner -# run: | -# ownerrepo=${{ github.repository }} -# owner=$(echo $ownerrepo | cut -f1 -d/) -# if [[ "$owner" == "iter8-tools" ]]; then -# owner=iter8 -# fi -# echo "OWNER=$owner" >> $GITHUB_ENV -# - uses: docker/setup-buildx-action@v2 -# - uses: docker/login-action@v2 -# with: -# username: ${{ secrets.DOCKERHUB_USERNAME }} -# password: ${{ secrets.DOCKERHUB_SECRET }} -# - uses: docker/build-push-action@v4 -# with: -# platforms: linux/amd64,linux/arm64 -# tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest -# push: true -# build-args: | -# TAG=v${{ env.VERSION }} + build-and-push: + name: Push Iter8 image to Docker Hub + needs: assets + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Get version + run: | + tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') + # Strip "v" prefix from tagref + echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV + echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV + - name: Get owner + run: | + ownerrepo=${{ github.repository }} + owner=$(echo $ownerrepo | cut -f1 -d/) + if [[ "$owner" == "iter8-tools" ]]; then + owner=iter8 + fi + echo "OWNER=$owner" >> $GITHUB_ENV + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_SECRET }} + - uses: docker/build-push-action@v4 + with: + platforms: linux/amd64,linux/arm64 + tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest + push: true + build-args: | + TAG=v${{ env.VERSION }} -# kubernetes-http-experiment: -# name: Kubernetes HTTP load test -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create httpbin application -# run: | -# kubectl create deployment httpbin --image=kennethreitz/httpbin -# kubectl expose deployment httpbin --type=ClusterIP --port=80 -# kubectl wait --for=condition=available --timeout=60s deploy/httpbin -# - name: iter8 k launch -# run: | -# iter8 k launch \ -# --set tasks={http} \ -# --set http.url="http://httpbin.default/get" -# - name: try other iter8 k commands -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete + kubernetes-http-experiment: + name: Kubernetes HTTP load test + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create httpbin application + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + - name: iter8 k launch + run: | + iter8 k launch \ + --set tasks={http} \ + --set http.url="http://httpbin.default/get" + - name: try other iter8 k commands + run: | + sleep 60 + iter8 k log + iter8 k delete -# kubernetes-grpc-experiment: -# name: Kubernetes gRPC load test -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create routeguide application -# run: | -# kubectl create deployment routeguide --image=golang --port=50051 \ -# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" -# kubectl expose deployment routeguide --port=50051 -# kubectl wait --for=condition=available --timeout=60s deployment/routeguide -# - name: Test gRPC service with grpcurl -# run: | -# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml -# kubectl apply -f grpcurl-routeguide.yaml -# sleep 180 -# kubectl logs deploy/sleep + kubernetes-grpc-experiment: + name: Kubernetes gRPC load test + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create routeguide application + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + - name: Test gRPC service with grpcurl + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep -# - name: iter8 k launch -# run: | -# iter8 k launch \ -# --set tasks={grpc} \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ -# - name: try other iter8 k commands -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete + - name: iter8 k launch + run: | + iter8 k launch \ + --set tasks={grpc} \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.call=routeguide.RouteGuide.GetFeature \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + - name: try other iter8 k commands + run: | + sleep 60 + iter8 k log + iter8 k delete -# kubernetes-grpc-experiment2: -# name: Kubernetes gRPC load test 2 -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create hello application -# run: | -# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 -# kubectl expose deploy hello --port=50051 -# kubectl wait --for=condition=available --timeout=60s deploy/hello -# - name: iter8 k launch -# run: | -# iter8 k launch \ -# --set tasks={grpc} \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" -# - name: try other iter8 k commands -# run: | -# sleep 60 -# iter8 k log -# iter8 k delete + kubernetes-grpc-experiment2: + name: Kubernetes gRPC load test 2 + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create hello application + run: | + kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 + kubectl expose deploy hello --port=50051 + kubectl wait --for=condition=available --timeout=60s deploy/hello + - name: iter8 k launch + run: | + iter8 k launch \ + --set tasks={grpc} \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" + - name: try other iter8 k commands + run: | + sleep 60 + iter8 k log + iter8 k delete -# readiness: -# name: Kubernetes readiness test -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create httpbin application -# run: | -# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 -# kubectl expose deploy httpbin --port=80 -# - name: k launch with readiness checks -# run: | -# iter8 k launch \ -# --set "tasks={ready,http}" \ -# --set ready.deploy="httpbin" \ -# --set ready.service="httpbin" \ -# --set ready.timeout=60s \ -# --set http.url=http://httpbin.default + readiness: + name: Kubernetes readiness test + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create httpbin application + run: | + kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + kubectl expose deploy httpbin --port=80 + - name: k launch with readiness checks + run: | + iter8 k launch \ + --set "tasks={ready,http}" \ + --set ready.deploy="httpbin" \ + --set ready.service="httpbin" \ + --set ready.timeout=60s \ + --set http.url=http://httpbin.default -# readiness-with-namespace: -# name: Kubernetes readiness test with namespace -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create httpbin application -# run: | -# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 -# kubectl expose deploy httpbin --port=80 -# kubectl create namespace experiments -# - name: k launch with readiness checks -# run: | -# iter8 k launch -n experiments \ -# --set "tasks={ready,http}" \ -# --set ready.deploy="httpbin" \ -# --set ready.service="httpbin" \ -# --set ready.timeout=60s \ -# --set ready.namespace=default \ -# --set http.url=http://httpbin.default/get \ No newline at end of file + readiness-with-namespace: + name: Kubernetes readiness test with namespace + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create httpbin application + run: | + kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + kubectl expose deploy httpbin --port=80 + kubectl create namespace experiments + - name: k launch with readiness checks + run: | + iter8 k launch -n experiments \ + --set "tasks={ready,http}" \ + --set ready.deploy="httpbin" \ + --set ready.service="httpbin" \ + --set ready.timeout=60s \ + --set ready.namespace=default \ + --set http.url=http://httpbin.default/get \ No newline at end of file From f9349ad86c68492b578e2a2b0567122e478df13a Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 09:33:15 -0400 Subject: [PATCH 109/121] Add controller to assets Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 54 +++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 9 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index 730d9953e..beb293355 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -102,16 +102,23 @@ jobs: kubectl create deployment httpbin --image=kennethreitz/httpbin kubectl expose deployment httpbin --type=ClusterIP --port=80 kubectl wait --for=condition=available --timeout=60s deploy/httpbin + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: iter8 k launch run: | iter8 k launch \ --set tasks={http} \ --set http.url="http://httpbin.default/get" - - name: try other iter8 k commands - run: | sleep 60 + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f kubernetes-grpc-experiment: name: Kubernetes gRPC load test @@ -130,13 +137,16 @@ jobs: -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" kubectl expose deployment routeguide --port=50051 kubectl wait --for=condition=available --timeout=60s deployment/routeguide + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: Test gRPC service with grpcurl run: | curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml kubectl apply -f grpcurl-routeguide.yaml sleep 180 kubectl logs deploy/sleep - - name: iter8 k launch run: | iter8 k launch \ @@ -145,11 +155,14 @@ jobs: --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ --set grpc.call=routeguide.RouteGuide.GetFeature \ --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - - name: try other iter8 k commands - run: | sleep 60 + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f kubernetes-grpc-experiment2: name: Kubernetes gRPC load test 2 @@ -167,6 +180,10 @@ jobs: kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 kubectl expose deploy hello --port=50051 kubectl wait --for=condition=available --timeout=60s deploy/hello + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: iter8 k launch run: | iter8 k launch \ @@ -174,11 +191,14 @@ jobs: --set grpc.host="hello.default:50051" \ --set grpc.call="helloworld.Greeter.SayHello" \ --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" - - name: try other iter8 k commands - run: | sleep 60 + - name: Try other iter8 k commands + run: | iter8 k log iter8 k delete + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f readiness: name: Kubernetes readiness test @@ -195,6 +215,10 @@ jobs: run: | kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 kubectl expose deploy httpbin --port=80 + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: k launch with readiness checks run: | iter8 k launch \ @@ -202,7 +226,11 @@ jobs: --set ready.deploy="httpbin" \ --set ready.service="httpbin" \ --set ready.timeout=60s \ - --set http.url=http://httpbin.default + --set http.url=http://httpbin.default/get + sleep 60 + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f readiness-with-namespace: name: Kubernetes readiness test with namespace @@ -220,6 +248,10 @@ jobs: kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 kubectl expose deploy httpbin --port=80 kubectl create namespace experiments + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 - name: k launch with readiness checks run: | iter8 k launch -n experiments \ @@ -228,4 +260,8 @@ jobs: --set ready.service="httpbin" \ --set ready.timeout=60s \ --set ready.namespace=default \ - --set http.url=http://httpbin.default/get \ No newline at end of file + --set http.url=http://httpbin.default/get + sleep 60 + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f \ No newline at end of file From c19085f33af0c4adbcd9397af6e8030a0991246f Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 09:33:42 -0400 Subject: [PATCH 110/121] Uncomment again Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 518 ++++++------- .github/workflows/testcharts.yaml | 990 ++++++++++++------------- .github/workflows/testkustomize.yaml | 208 +++--- .github/workflows/testperformance.yaml | 714 +++++++++--------- 4 files changed, 1215 insertions(+), 1215 deletions(-) diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml index beb293355..0fafd2960 100644 --- a/.github/workflows/assets.yaml +++ b/.github/workflows/assets.yaml @@ -1,267 +1,267 @@ -name: Publish binaries and Docker image +# name: Publish binaries and Docker image -on: - release: - types: [published] +# on: +# release: +# types: [published] -jobs: - assets: - name: Publish binaries - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: Build binaries - run: | - VERSION=${GITHUB_REF#refs/*/} - echo "Version: ${VERSION}" - make dist - - name: Upload binaries to release - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: _dist/iter8-*.tar.gz - tag: ${{ github.ref }} - overwrite: true - file_glob: true - - name: Create checksum - run: | - VERSION=${GITHUB_REF#refs/*/} - echo "VERSION=$VERSION" >> $GITHUB_ENV - wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip - sha256sum ${VERSION}.zip > checksum.txt - wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz - sha256sum ${VERSION}.tar.gz >> checksum.txt - cd _dist - for f in iter8-*.tar.gz - do - sha256sum ${f} >> ../checksum.txt - done - # pick up darwin checksum and export it - echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV - - name: Upload checksum to release - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - asset_name: checksum.txt - file: checksum.txt - tag: ${{ github.ref }} - overwrite: true +# jobs: +# assets: +# name: Publish binaries +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - uses: actions/checkout@v3 +# - name: Build binaries +# run: | +# VERSION=${GITHUB_REF#refs/*/} +# echo "Version: ${VERSION}" +# make dist +# - name: Upload binaries to release +# uses: svenstaro/upload-release-action@v2 +# with: +# repo_token: ${{ secrets.GITHUB_TOKEN }} +# file: _dist/iter8-*.tar.gz +# tag: ${{ github.ref }} +# overwrite: true +# file_glob: true +# - name: Create checksum +# run: | +# VERSION=${GITHUB_REF#refs/*/} +# echo "VERSION=$VERSION" >> $GITHUB_ENV +# wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip +# sha256sum ${VERSION}.zip > checksum.txt +# wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz +# sha256sum ${VERSION}.tar.gz >> checksum.txt +# cd _dist +# for f in iter8-*.tar.gz +# do +# sha256sum ${f} >> ../checksum.txt +# done +# # pick up darwin checksum and export it +# echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV +# - name: Upload checksum to release +# uses: svenstaro/upload-release-action@v2 +# with: +# repo_token: ${{ secrets.GITHUB_TOKEN }} +# asset_name: checksum.txt +# file: checksum.txt +# tag: ${{ github.ref }} +# overwrite: true - build-and-push: - name: Push Iter8 image to Docker Hub - needs: assets - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - name: Get version - run: | - tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') - # Strip "v" prefix from tagref - echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV - echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV - - name: Get owner - run: | - ownerrepo=${{ github.repository }} - owner=$(echo $ownerrepo | cut -f1 -d/) - if [[ "$owner" == "iter8-tools" ]]; then - owner=iter8 - fi - echo "OWNER=$owner" >> $GITHUB_ENV - - uses: docker/setup-buildx-action@v2 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_SECRET }} - - uses: docker/build-push-action@v4 - with: - platforms: linux/amd64,linux/arm64 - tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest - push: true - build-args: | - TAG=v${{ env.VERSION }} +# build-and-push: +# name: Push Iter8 image to Docker Hub +# needs: assets +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v3 +# with: +# fetch-depth: 0 +# - name: Get version +# run: | +# tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') +# # Strip "v" prefix from tagref +# echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV +# echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV +# - name: Get owner +# run: | +# ownerrepo=${{ github.repository }} +# owner=$(echo $ownerrepo | cut -f1 -d/) +# if [[ "$owner" == "iter8-tools" ]]; then +# owner=iter8 +# fi +# echo "OWNER=$owner" >> $GITHUB_ENV +# - uses: docker/setup-buildx-action@v2 +# - uses: docker/login-action@v2 +# with: +# username: ${{ secrets.DOCKERHUB_USERNAME }} +# password: ${{ secrets.DOCKERHUB_SECRET }} +# - uses: docker/build-push-action@v4 +# with: +# platforms: linux/amd64,linux/arm64 +# tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest +# push: true +# build-args: | +# TAG=v${{ env.VERSION }} - kubernetes-http-experiment: - name: Kubernetes HTTP load test - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create httpbin application - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - name: Install controller - run: | - helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - name: iter8 k launch - run: | - iter8 k launch \ - --set tasks={http} \ - --set http.url="http://httpbin.default/get" - sleep 60 - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f +# kubernetes-http-experiment: +# name: Kubernetes HTTP load test +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Create httpbin application +# run: | +# kubectl create deployment httpbin --image=kennethreitz/httpbin +# kubectl expose deployment httpbin --type=ClusterIP --port=80 +# kubectl wait --for=condition=available --timeout=60s deploy/httpbin +# - name: Install controller +# run: | +# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 +# - name: iter8 k launch +# run: | +# iter8 k launch \ +# --set tasks={http} \ +# --set http.url="http://httpbin.default/get" +# sleep 60 +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - kubernetes-grpc-experiment: - name: Kubernetes gRPC load test - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create routeguide application - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - name: Install controller - run: | - helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - name: Test gRPC service with grpcurl - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep - - name: iter8 k launch - run: | - iter8 k launch \ - --set tasks={grpc} \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.call=routeguide.RouteGuide.GetFeature \ - --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - sleep 60 - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f +# kubernetes-grpc-experiment: +# name: Kubernetes gRPC load test +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Create routeguide application +# run: | +# kubectl create deployment routeguide --image=golang --port=50051 \ +# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" +# kubectl expose deployment routeguide --port=50051 +# kubectl wait --for=condition=available --timeout=60s deployment/routeguide +# - name: Install controller +# run: | +# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 +# - name: Test gRPC service with grpcurl +# run: | +# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml +# kubectl apply -f grpcurl-routeguide.yaml +# sleep 180 +# kubectl logs deploy/sleep +# - name: iter8 k launch +# run: | +# iter8 k launch \ +# --set tasks={grpc} \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ +# sleep 60 +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - kubernetes-grpc-experiment2: - name: Kubernetes gRPC load test 2 - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create hello application - run: | - kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 - kubectl expose deploy hello --port=50051 - kubectl wait --for=condition=available --timeout=60s deploy/hello - - name: Install controller - run: | - helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - name: iter8 k launch - run: | - iter8 k launch \ - --set tasks={grpc} \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" - sleep 60 - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f +# kubernetes-grpc-experiment2: +# name: Kubernetes gRPC load test 2 +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Create hello application +# run: | +# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 +# kubectl expose deploy hello --port=50051 +# kubectl wait --for=condition=available --timeout=60s deploy/hello +# - name: Install controller +# run: | +# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 +# - name: iter8 k launch +# run: | +# iter8 k launch \ +# --set tasks={grpc} \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" +# sleep 60 +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - readiness: - name: Kubernetes readiness test - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create httpbin application - run: | - kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 - kubectl expose deploy httpbin --port=80 - - name: Install controller - run: | - helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - name: k launch with readiness checks - run: | - iter8 k launch \ - --set "tasks={ready,http}" \ - --set ready.deploy="httpbin" \ - --set ready.service="httpbin" \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default/get - sleep 60 - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f +# readiness: +# name: Kubernetes readiness test +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Create httpbin application +# run: | +# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 +# kubectl expose deploy httpbin --port=80 +# - name: Install controller +# run: | +# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 +# - name: k launch with readiness checks +# run: | +# iter8 k launch \ +# --set "tasks={ready,http}" \ +# --set ready.deploy="httpbin" \ +# --set ready.service="httpbin" \ +# --set ready.timeout=60s \ +# --set http.url=http://httpbin.default/get +# sleep 60 +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - readiness-with-namespace: - name: Kubernetes readiness test with namespace - needs: build-and-push - runs-on: ubuntu-latest - steps: - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create httpbin application - run: | - kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 - kubectl expose deploy httpbin --port=80 - kubectl create namespace experiments - - name: Install controller - run: | - helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - name: k launch with readiness checks - run: | - iter8 k launch -n experiments \ - --set "tasks={ready,http}" \ - --set ready.deploy="httpbin" \ - --set ready.service="httpbin" \ - --set ready.timeout=60s \ - --set ready.namespace=default \ - --set http.url=http://httpbin.default/get - sleep 60 - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f \ No newline at end of file +# readiness-with-namespace: +# name: Kubernetes readiness test with namespace +# needs: build-and-push +# runs-on: ubuntu-latest +# steps: +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s +# - name: Create httpbin application +# run: | +# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 +# kubectl expose deploy httpbin --port=80 +# kubectl create namespace experiments +# - name: Install controller +# run: | +# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 +# - name: k launch with readiness checks +# run: | +# iter8 k launch -n experiments \ +# --set "tasks={ready,http}" \ +# --set ready.deploy="httpbin" \ +# --set ready.service="httpbin" \ +# --set ready.timeout=60s \ +# --set ready.namespace=default \ +# --set http.url=http://httpbin.default/get +# sleep 60 +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f \ No newline at end of file diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index a299ff3f5..9a068d8db 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -1,499 +1,499 @@ -name: Tests to ensure that changes to charts do not break user experience - -on: - pull_request: - -# Kind versions used to test Iter8 on different versions of Kubernetes -# From: https://github.com/kubernetes-sigs/kind/releases -env: - versions: | - kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f - kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f - kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 - kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c - kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 - kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 - kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 - kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 - -jobs: - # Get the different Kind versions - get_versions: - runs-on: ubuntu-latest - - steps: - - name: Get the different Kind versions - id: set-matrix - run: | - # Serialize versions into JSON array - jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") - echo $jsonVersions - - # Output serialized jsonVersions - echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT - - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - - http-experiment: - name: HTTP load test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create httpbin application - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - - name: Install controller - run: | - helm install iter8 charts/controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={http}" \ - --set http.url="http://httpbin.default/get" \ - sleep 60 - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k log - iter8 k delete - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - http-payload-experiment: - name: HTTP load test with payload - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create httpbin application - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - - name: Install controller - run: | - helm install iter8 charts/controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={http}" \ - --set http.url="http://httpbin.default/post" \ - --set http.payloadStr=hello \ - sleep 60 - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k log - iter8 k delete - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - http-multiple-experiment: - name: HTTP load test with multiple endpoints - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 - - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create httpbin application - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment httpbin --image=kennethreitz/httpbin - kubectl expose deployment httpbin --type=ClusterIP --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - - name: Install controller - run: | - helm install iter8 charts/controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={http}" \ - --set http.endpoints.get.url=http://httpbin.default/get \ - --set http.endpoints.getAnything.url=http://httpbin.default/anything \ - --set http.endpoints.post.url=http://httpbin.default/post \ - --set http.endpoints.post.payloadStr=hello \ - sleep 60 - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k log - iter8 k delete - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - grpc-experiment: - name: gRPC load test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 +# name: Tests to ensure that changes to charts do not break user experience + +# on: +# pull_request: + +# # Kind versions used to test Iter8 on different versions of Kubernetes +# # From: https://github.com/kubernetes-sigs/kind/releases +# env: +# versions: | +# kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f +# kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f +# kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 +# kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c +# kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 +# kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 +# kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 +# kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 + +# jobs: +# # Get the different Kind versions +# get_versions: +# runs-on: ubuntu-latest + +# steps: +# - name: Get the different Kind versions +# id: set-matrix +# run: | +# # Serialize versions into JSON array +# jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") +# echo $jsonVersions + +# # Output serialized jsonVersions +# echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT + +# outputs: +# matrix: ${{ steps.set-matrix.outputs.matrix }} + +# http-experiment: +# name: HTTP load test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 + +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create httpbin application +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment httpbin --image=kennethreitz/httpbin +# kubectl expose deployment httpbin --type=ClusterIP --port=80 +# kubectl wait --for=condition=available --timeout=60s deploy/httpbin + +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={http}" \ +# --set http.url="http://httpbin.default/get" \ +# sleep 60 + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k log +# iter8 k delete + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & + +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + +# http-payload-experiment: +# name: HTTP load test with payload +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 + +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create httpbin application +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment httpbin --image=kennethreitz/httpbin +# kubectl expose deployment httpbin --type=ClusterIP --port=80 +# kubectl wait --for=condition=available --timeout=60s deploy/httpbin + +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={http}" \ +# --set http.url="http://httpbin.default/post" \ +# --set http.payloadStr=hello \ +# sleep 60 + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k log +# iter8 k delete + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & + +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + +# http-multiple-experiment: +# name: HTTP load test with multiple endpoints +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 + +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create httpbin application +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment httpbin --image=kennethreitz/httpbin +# kubectl expose deployment httpbin --type=ClusterIP --port=80 +# kubectl wait --for=condition=available --timeout=60s deploy/httpbin + +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={http}" \ +# --set http.endpoints.get.url=http://httpbin.default/get \ +# --set http.endpoints.getAnything.url=http://httpbin.default/anything \ +# --set http.endpoints.post.url=http://httpbin.default/post \ +# --set http.endpoints.post.payloadStr=hello \ +# sleep 60 + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k log +# iter8 k delete + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & + +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + +# grpc-experiment: +# name: gRPC load test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create routeguide application - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - - name: Test gRPC service with grpcurl - if: steps.modified-files.outputs.any_modified == 'true' - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep - - - name: Install controller - run: | - helm install iter8 charts/controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.call=routeguide.RouteGuide.GetFeature \ - --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - sleep 60 - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k log - iter8 k delete - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - grpc-multiple-experiment: - name: gRPC load test with multiple endpoints - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create routeguide application +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment routeguide --image=golang --port=50051 \ +# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" +# kubectl expose deployment routeguide --port=50051 +# kubectl wait --for=condition=available --timeout=60s deployment/routeguide + +# - name: Test gRPC service with grpcurl +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml +# kubectl apply -f grpcurl-routeguide.yaml +# sleep 180 +# kubectl logs deploy/sleep + +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=routeguide \ +# --set ready.service=routeguide \ +# --set ready.timeout=60s \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ +# sleep 60 + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k log +# iter8 k delete + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & + +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + +# grpc-multiple-experiment: +# name: gRPC load test with multiple endpoints +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create routeguide application - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - - name: Test gRPC service with grpcurl - if: steps.modified-files.outputs.any_modified == 'true' - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep - - - name: Install controller - run: | - helm install iter8 charts/controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ - --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ - --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ - sleep 60 - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k log - iter8 k delete - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - grpc-experiment2: - name: gRPC load test 2 - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/iter8 folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/iter8 +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create routeguide application +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deployment routeguide --image=golang --port=50051 \ +# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" +# kubectl expose deployment routeguide --port=50051 +# kubectl wait --for=condition=available --timeout=60s deployment/routeguide + +# - name: Test gRPC service with grpcurl +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml +# kubectl apply -f grpcurl-routeguide.yaml +# sleep 180 +# kubectl logs deploy/sleep + +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=routeguide \ +# --set ready.service=routeguide \ +# --set ready.timeout=60s \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ +# --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ +# --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ +# sleep 60 + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k log +# iter8 k delete + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & + +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + +# grpc-experiment2: +# name: gRPC load test 2 +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/iter8 folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/iter8 - - name: Install Iter8 - run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 - if: steps.modified-files.outputs.any_modified == 'true' - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} - - - name: Create hello application - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 - kubectl expose deploy hello --port=50051 - kubectl wait --for=condition=available --timeout=60s deploy/hello - - - name: Install controller - run: | - helm install iter8 charts/controller --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: iter8 k launch - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k launch \ - --localChart \ - --chartName charts/iter8 \ - --set "tasks={grpc}" \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - sleep 60 - - - name: Try other iter8 k commands - if: steps.modified-files.outputs.any_modified == 'true' - run: | - iter8 k log - iter8 k delete - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - controller: - name: Controller test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/controller folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/controller - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} +# - name: Install Iter8 +# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 +# if: steps.modified-files.outputs.any_modified == 'true' + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} + +# - name: Create hello application +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 +# kubectl expose deploy hello --port=50051 +# kubectl wait --for=condition=available --timeout=60s deploy/hello + +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: iter8 k launch +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k launch \ +# --localChart \ +# --chartName charts/iter8 \ +# --set "tasks={grpc}" \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ +# sleep 60 + +# - name: Try other iter8 k commands +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# iter8 k log +# iter8 k delete + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & + +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + +# controller: +# name: Controller test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/controller folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/controller + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} - - name: Start controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - helm install controller charts/controller -f charts/controller/testdata/values.yaml - - - name: Check controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/controller +# - name: Start controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# helm install controller charts/controller -f charts/controller/testdata/values.yaml + +# - name: Check controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl rollout status --watch --timeout=60s statefulset.apps/controller diff --git a/.github/workflows/testkustomize.yaml b/.github/workflows/testkustomize.yaml index dc17ff7fa..96b47eeb8 100644 --- a/.github/workflows/testkustomize.yaml +++ b/.github/workflows/testkustomize.yaml @@ -1,106 +1,106 @@ -name: Test kustomize experiments - -on: - pull_request: - -# Kind versions used to test Iter8 on different versions of Kubernetes -# From: https://github.com/kubernetes-sigs/kind/releases -env: - versions: | - kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f - kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f - kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 - kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c - kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 - kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 - kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 - kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 - -jobs: - # Get the paths for the Helm charts to lint - get_versions: - runs-on: ubuntu-latest - - steps: - - name: Get the paths for Helm charts to lint - id: set-matrix - run: | - # Serialize versions into JSON array - jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") - echo $jsonVersions - - # Output serialized jsonVersions - echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT - - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - - controller: - name: Controller test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/controller folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/controller - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} +# name: Test kustomize experiments + +# on: +# pull_request: + +# # Kind versions used to test Iter8 on different versions of Kubernetes +# # From: https://github.com/kubernetes-sigs/kind/releases +# env: +# versions: | +# kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f +# kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f +# kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 +# kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c +# kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 +# kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 +# kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 +# kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 + +# jobs: +# # Get the paths for the Helm charts to lint +# get_versions: +# runs-on: ubuntu-latest + +# steps: +# - name: Get the paths for Helm charts to lint +# id: set-matrix +# run: | +# # Serialize versions into JSON array +# jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") +# echo $jsonVersions + +# # Output serialized jsonVersions +# echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT + +# outputs: +# matrix: ${{ steps.set-matrix.outputs.matrix }} + +# controller: +# name: Controller test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/controller folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/controller + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} - - name: Start controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl apply -k kustomize/controller/namespaceScoped - - - name: Check controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 - - controller-clusterScoped: - name: Controller cluster scoped test - needs: get_versions - runs-on: ubuntu-latest - strategy: - matrix: - version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - - steps: - - name: Check out code - uses: actions/checkout@v3 - - - name: Get modified files in the charts/controller folder - id: modified-files - uses: tj-actions/changed-files@v35 - with: - files: charts/controller - - - name: Start kind cluster ${{ matrix.version }} - uses: helm/kind-action@v1.5.0 - if: steps.modified-files.outputs.any_modified == 'true' - with: - wait: 300s - node_image: ${{ matrix.version }} +# - name: Start controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl apply -k kustomize/controller/namespaceScoped + +# - name: Check controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 + +# controller-clusterScoped: +# name: Controller cluster scoped test +# needs: get_versions +# runs-on: ubuntu-latest +# strategy: +# matrix: +# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + +# steps: +# - name: Check out code +# uses: actions/checkout@v3 + +# - name: Get modified files in the charts/controller folder +# id: modified-files +# uses: tj-actions/changed-files@v35 +# with: +# files: charts/controller + +# - name: Start kind cluster ${{ matrix.version }} +# uses: helm/kind-action@v1.5.0 +# if: steps.modified-files.outputs.any_modified == 'true' +# with: +# wait: 300s +# node_image: ${{ matrix.version }} - - name: Start controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl apply -k kustomize/controller/clusterScoped - - - name: Check controller - if: steps.modified-files.outputs.any_modified == 'true' - run: | - kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file +# - name: Start controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl apply -k kustomize/controller/clusterScoped + +# - name: Check controller +# if: steps.modified-files.outputs.any_modified == 'true' +# run: | +# kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index 383042c24..8d4ef6690 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -1,363 +1,363 @@ -name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) - -on: - pull_request: - -jobs: - unit-test: - name: Unit test - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Test and compute coverage - run: make coverage # includes vet and lint - - name: Enforce coverage - run: | - export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') - echo "code coverage is at ${COVERAGE}" - if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ - echo "all good... coverage is above 76.0%"; - else \ - echo "not good... coverage is not above 76.0%"; - exit 1 - fi - - build-push-test-image: - name: Build and push test Docker image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: Create Dockerfile.dev - run: | - echo "# Small linux image with iter8 binary - FROM debian:buster-slim +# name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) + +# on: +# pull_request: + +# jobs: +# unit-test: +# name: Unit test +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - name: Check out code into the Go module directory +# uses: actions/checkout@v3 +# - name: Test and compute coverage +# run: make coverage # includes vet and lint +# - name: Enforce coverage +# run: | +# export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') +# echo "code coverage is at ${COVERAGE}" +# if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ +# echo "all good... coverage is above 76.0%"; +# else \ +# echo "not good... coverage is not above 76.0%"; +# exit 1 +# fi + +# build-push-test-image: +# name: Build and push test Docker image +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - uses: actions/checkout@v3 +# - name: Create Dockerfile.dev +# run: | +# echo "# Small linux image with iter8 binary +# FROM debian:buster-slim - # Install curl - RUN apt-get update && apt-get install -y curl +# # Install curl +# RUN apt-get update && apt-get install -y curl - # Download iter8 compressed binary - # use COPY instead of wget - COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz +# # Download iter8 compressed binary +# # use COPY instead of wget +# COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz - # Extract iter8 - RUN tar -xvf iter8-linux-amd64.tar.gz +# # Extract iter8 +# RUN tar -xvf iter8-linux-amd64.tar.gz - # Extract iter8 - RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - - name: Get version and tag - id: versionTag - run: | - # GitHub ref name - VERSION=${GITHUB_REF_NAME} - echo "VERSION: $VERSION" - echo "VERSION=${VERSION}" >> "$GITHUB_ENV" - echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" - - name: Get owner - run: | - OWNER_REPO=${{ github.repository }} - OWNER=$(echo $OWNER_REPO | cut -f1 -d/) - if [[ "$OWNER" == "iter8-tools" ]]; then - OWNER=iter8 - fi - echo "OWNER: $OWNER" - echo "OWNER=$OWNER" >> $GITHUB_ENV - - name: Get image tag - id: imageTag - run: | - # Docker image - IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) - echo "IMAGE_TAG: $IMAGE_TAG" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - - uses: docker/setup-buildx-action@v2 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_SECRET }} - - uses: docker/build-push-action@v4 - with: - platforms: linux/amd64 - tags: ${{ env.IMAGE_TAG }} - push: true - file: "{context}/Dockerfile.dev" - outputs: - VERSION: ${{ steps.versionTag.outputs.VERSION }} - IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} - - kubernetes-load-test-http: - name: HTTP load test (with readiness) at the edge of Kubernetes - needs: build-push-test-image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - - name: Create httpbin application - run: | - kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 - kubectl expose deploy httpbin --port=80 - - - name: Install controller - run: | - helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & +# # Extract iter8 +# RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev +# - name: Get version and tag +# id: versionTag +# run: | +# # GitHub ref name +# VERSION=${GITHUB_REF_NAME} +# echo "VERSION: $VERSION" +# echo "VERSION=${VERSION}" >> "$GITHUB_ENV" +# echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" +# - name: Get owner +# run: | +# OWNER_REPO=${{ github.repository }} +# OWNER=$(echo $OWNER_REPO | cut -f1 -d/) +# if [[ "$OWNER" == "iter8-tools" ]]; then +# OWNER=iter8 +# fi +# echo "OWNER: $OWNER" +# echo "OWNER=$OWNER" >> $GITHUB_ENV +# - name: Get image tag +# id: imageTag +# run: | +# # Docker image +# IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) +# echo "IMAGE_TAG: $IMAGE_TAG" +# echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" +# echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" +# - uses: docker/setup-buildx-action@v2 +# - uses: docker/login-action@v2 +# with: +# username: ${{ secrets.DOCKERHUB_USERNAME }} +# password: ${{ secrets.DOCKERHUB_SECRET }} +# - uses: docker/build-push-action@v4 +# with: +# platforms: linux/amd64 +# tags: ${{ env.IMAGE_TAG }} +# push: true +# file: "{context}/Dockerfile.dev" +# outputs: +# VERSION: ${{ steps.versionTag.outputs.VERSION }} +# IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} + +# kubernetes-load-test-http: +# name: HTTP load test (with readiness) at the edge of Kubernetes +# needs: build-push-test-image +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - name: Check out code into the Go module directory +# uses: actions/checkout@v3 +# - name: Build and install Iter8 +# run: make install +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s + +# - name: Create httpbin application +# run: | +# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 +# kubectl expose deploy httpbin --port=80 + +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & - - name: load-test-http in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default/get \ - --set http.duration="3s" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - - name: load-test-http with payload in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default/post \ - --set http.payloadStr=hello \ - --set http.duration="3s" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - - name: load-test-http with multiple endpoints in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.endpoints.get.url=http://httpbin.default/get \ - --set http.endpoints.getAnything.url=http://httpbin.default/anything \ - --set http.endpoints.post.url=http://httpbin.default/post \ - --set http.endpoints.post.payloadStr=hello \ - --set http.duration="3s" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - kubernetes-load-test-grpc: - name: gRPC load test with various URLs - needs: build-push-test-image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install - - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - - name: Create routeguide application - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - - name: Test gRPC service with grpcurl - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep +# - name: load-test-http in Kubernetes +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ +# --set "tasks={ready,http}" \ +# --set ready.deploy=httpbin \ +# --set ready.service=httpbin \ +# --set ready.timeout=60s \ +# --set http.url=http://httpbin.default/get \ +# --set http.duration="3s" +# sleep 60 + +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete + +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + +# - name: load-test-http with payload in Kubernetes +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ +# --set "tasks={ready,http}" \ +# --set ready.deploy=httpbin \ +# --set ready.service=httpbin \ +# --set ready.timeout=60s \ +# --set http.url=http://httpbin.default/post \ +# --set http.payloadStr=hello \ +# --set http.duration="3s" +# sleep 60 + +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete + +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + +# - name: load-test-http with multiple endpoints in Kubernetes +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ +# --set "tasks={ready,http}" \ +# --set ready.deploy=httpbin \ +# --set ready.service=httpbin \ +# --set ready.timeout=60s \ +# --set http.endpoints.get.url=http://httpbin.default/get \ +# --set http.endpoints.getAnything.url=http://httpbin.default/anything \ +# --set http.endpoints.post.url=http://httpbin.default/post \ +# --set http.endpoints.post.payloadStr=hello \ +# --set http.duration="3s" +# sleep 60 + +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete + +# - name: Check GET /httpDashboard +# run: | +# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + +# kubernetes-load-test-grpc: +# name: gRPC load test with various URLs +# needs: build-push-test-image +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - name: Check out code into the Go module directory +# uses: actions/checkout@v3 +# - name: Build and install Iter8 +# run: make install + +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s + +# - name: Create routeguide application +# run: | +# kubectl create deployment routeguide --image=golang --port=50051 \ +# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" +# kubectl expose deployment routeguide --port=50051 +# kubectl wait --for=condition=available --timeout=60s deployment/routeguide + +# - name: Test gRPC service with grpcurl +# run: | +# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml +# kubectl apply -f grpcurl-routeguide.yaml +# sleep 180 +# kubectl logs deploy/sleep - - name: Install controller - run: | - helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: load test grpc service - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.call=routeguide.RouteGuide.GetFeature \ - --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - - name: load test grpc service with multiple endpoints - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ - --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ - --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - kubernetes-load-test-grpc2: - name: gRPC load test 2 with various URLs - needs: build-push-test-image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install - - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - - name: Create hello application - run: | - kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 - kubectl expose deploy hello --port=50051 - - - name: Install controller - run: | - helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: load test grpc service with protoURL - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=hello \ - --set ready.service=hello \ - --set ready.timeout=60s \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set grpc.data.name="frodo" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - - name: load test grpc service with proto/data/metadata URLs - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=hello \ - --set ready.service=hello \ - --set ready.timeout=60s \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ - --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & + +# - name: load test grpc service +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=routeguide \ +# --set ready.service=routeguide \ +# --set ready.timeout=60s \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json +# sleep 60 + +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete + +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + +# - name: load test grpc service with multiple endpoints +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=routeguide \ +# --set ready.service=routeguide \ +# --set ready.timeout=60s \ +# --set grpc.host=routeguide.default:50051 \ +# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ +# --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ +# --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ +# --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ +# --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json +# sleep 60 + +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete + +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + +# kubernetes-load-test-grpc2: +# name: gRPC load test 2 with various URLs +# needs: build-push-test-image +# runs-on: ubuntu-latest +# steps: +# - name: Install Go +# uses: actions/setup-go@v4 +# with: +# go-version: 1.19 +# - name: Check out code into the Go module directory +# uses: actions/checkout@v3 +# - name: Build and install Iter8 +# run: make install + +# - name: Start kind cluster +# uses: helm/kind-action@v1.5.0 +# with: +# wait: 300s + +# - name: Create hello application +# run: | +# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 +# kubectl expose deploy hello --port=50051 + +# - name: Install controller +# run: | +# helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace +# kubectl rollout status --watch --timeout=60s statefulset/iter8 + +# - name: Expose metrics service +# run: | +# kubectl port-forward service/iter8 8080:8080 & + +# - name: load test grpc service with protoURL +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=hello \ +# --set ready.service=hello \ +# --set ready.timeout=60s \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ +# --set grpc.data.name="frodo" +# sleep 60 + +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete + +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + +# - name: load test grpc service with proto/data/metadata URLs +# run: | +# iter8 k launch --localChart --chartName charts/iter8 \ +# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ +# --set "tasks={ready,grpc}" \ +# --set ready.deploy=hello \ +# --set ready.service=hello \ +# --set ready.timeout=60s \ +# --set grpc.host="hello.default:50051" \ +# --set grpc.call="helloworld.Greeter.SayHello" \ +# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ +# --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ +# --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" +# sleep 60 + +# - name: Try other iter8 k commands +# run: | +# iter8 k log +# iter8 k delete + +# - name: Check GET /grpcDashboard +# run: | +# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f From 8218348a38c27f06a13d0da242b1c720a499f0da Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 10:51:16 -0400 Subject: [PATCH 111/121] Temp changes Signed-off-by: Alan Cha --- .github/workflows/testperformance.yaml | 721 +++++++++++++------------ 1 file changed, 364 insertions(+), 357 deletions(-) diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml index 8d4ef6690..36424a0cb 100644 --- a/.github/workflows/testperformance.yaml +++ b/.github/workflows/testperformance.yaml @@ -1,363 +1,370 @@ -# name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) - -# on: -# pull_request: - -# jobs: -# unit-test: -# name: Unit test -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - name: Check out code into the Go module directory -# uses: actions/checkout@v3 -# - name: Test and compute coverage -# run: make coverage # includes vet and lint -# - name: Enforce coverage -# run: | -# export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') -# echo "code coverage is at ${COVERAGE}" -# if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ -# echo "all good... coverage is above 76.0%"; -# else \ -# echo "not good... coverage is not above 76.0%"; -# exit 1 -# fi - -# build-push-test-image: -# name: Build and push test Docker image -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - uses: actions/checkout@v3 -# - name: Create Dockerfile.dev -# run: | -# echo "# Small linux image with iter8 binary -# FROM debian:buster-slim +name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) + +# Only runs when golang code (or test cases) have changed + +on: + pull_request: + branches: + - master + paths: + - **.go + - testdata/** + +jobs: + unit-test: + name: Unit test + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Test and compute coverage + run: make coverage # includes vet and lint + - name: Enforce coverage + run: | + export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') + echo "code coverage is at ${COVERAGE}" + if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ + echo "all good... coverage is above 76.0%"; + else \ + echo "not good... coverage is not above 76.0%"; + exit 1 + fi + + build-push-test-image: + name: Build and push test Docker image + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + - name: Create Dockerfile.dev + run: | + echo "# Small linux image with iter8 binary + FROM debian:buster-slim -# # Install curl -# RUN apt-get update && apt-get install -y curl + # Install curl + RUN apt-get update && apt-get install -y curl -# # Download iter8 compressed binary -# # use COPY instead of wget -# COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz + # Download iter8 compressed binary + # use COPY instead of wget + COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz -# # Extract iter8 -# RUN tar -xvf iter8-linux-amd64.tar.gz + # Extract iter8 + RUN tar -xvf iter8-linux-amd64.tar.gz -# # Extract iter8 -# RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev -# - name: Get version and tag -# id: versionTag -# run: | -# # GitHub ref name -# VERSION=${GITHUB_REF_NAME} -# echo "VERSION: $VERSION" -# echo "VERSION=${VERSION}" >> "$GITHUB_ENV" -# echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" -# - name: Get owner -# run: | -# OWNER_REPO=${{ github.repository }} -# OWNER=$(echo $OWNER_REPO | cut -f1 -d/) -# if [[ "$OWNER" == "iter8-tools" ]]; then -# OWNER=iter8 -# fi -# echo "OWNER: $OWNER" -# echo "OWNER=$OWNER" >> $GITHUB_ENV -# - name: Get image tag -# id: imageTag -# run: | -# # Docker image -# IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) -# echo "IMAGE_TAG: $IMAGE_TAG" -# echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" -# echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" -# - uses: docker/setup-buildx-action@v2 -# - uses: docker/login-action@v2 -# with: -# username: ${{ secrets.DOCKERHUB_USERNAME }} -# password: ${{ secrets.DOCKERHUB_SECRET }} -# - uses: docker/build-push-action@v4 -# with: -# platforms: linux/amd64 -# tags: ${{ env.IMAGE_TAG }} -# push: true -# file: "{context}/Dockerfile.dev" -# outputs: -# VERSION: ${{ steps.versionTag.outputs.VERSION }} -# IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} - -# kubernetes-load-test-http: -# name: HTTP load test (with readiness) at the edge of Kubernetes -# needs: build-push-test-image -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - name: Check out code into the Go module directory -# uses: actions/checkout@v3 -# - name: Build and install Iter8 -# run: make install -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s - -# - name: Create httpbin application -# run: | -# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 -# kubectl expose deploy httpbin --port=80 - -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & + # Extract iter8 + RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev + - name: Get version and tag + id: versionTag + run: | + # GitHub ref name + VERSION=${GITHUB_REF_NAME} + echo "VERSION: $VERSION" + echo "VERSION=${VERSION}" >> "$GITHUB_ENV" + echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" + - name: Get owner + run: | + OWNER_REPO=${{ github.repository }} + OWNER=$(echo $OWNER_REPO | cut -f1 -d/) + if [[ "$OWNER" == "iter8-tools" ]]; then + OWNER=iter8 + fi + echo "OWNER: $OWNER" + echo "OWNER=$OWNER" >> $GITHUB_ENV + - name: Get image tag + id: imageTag + run: | + # Docker image + IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) + echo "IMAGE_TAG: $IMAGE_TAG" + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" + echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_SECRET }} + - uses: docker/build-push-action@v4 + with: + platforms: linux/amd64 + tags: ${{ env.IMAGE_TAG }} + push: true + file: "{context}/Dockerfile.dev" + outputs: + VERSION: ${{ steps.versionTag.outputs.VERSION }} + IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} + + kubernetes-load-test-http: + name: HTTP load test (with readiness) at the edge of Kubernetes + needs: build-push-test-image + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create httpbin application + run: | + kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + kubectl expose deploy httpbin --port=80 + + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & -# - name: load-test-http in Kubernetes -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ -# --set "tasks={ready,http}" \ -# --set ready.deploy=httpbin \ -# --set ready.service=httpbin \ -# --set ready.timeout=60s \ -# --set http.url=http://httpbin.default/get \ -# --set http.duration="3s" -# sleep 60 - -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete - -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - -# - name: load-test-http with payload in Kubernetes -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ -# --set "tasks={ready,http}" \ -# --set ready.deploy=httpbin \ -# --set ready.service=httpbin \ -# --set ready.timeout=60s \ -# --set http.url=http://httpbin.default/post \ -# --set http.payloadStr=hello \ -# --set http.duration="3s" -# sleep 60 - -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete - -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - -# - name: load-test-http with multiple endpoints in Kubernetes -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ -# --set "tasks={ready,http}" \ -# --set ready.deploy=httpbin \ -# --set ready.service=httpbin \ -# --set ready.timeout=60s \ -# --set http.endpoints.get.url=http://httpbin.default/get \ -# --set http.endpoints.getAnything.url=http://httpbin.default/anything \ -# --set http.endpoints.post.url=http://httpbin.default/post \ -# --set http.endpoints.post.payloadStr=hello \ -# --set http.duration="3s" -# sleep 60 - -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete - -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - -# kubernetes-load-test-grpc: -# name: gRPC load test with various URLs -# needs: build-push-test-image -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - name: Check out code into the Go module directory -# uses: actions/checkout@v3 -# - name: Build and install Iter8 -# run: make install - -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s - -# - name: Create routeguide application -# run: | -# kubectl create deployment routeguide --image=golang --port=50051 \ -# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" -# kubectl expose deployment routeguide --port=50051 -# kubectl wait --for=condition=available --timeout=60s deployment/routeguide - -# - name: Test gRPC service with grpcurl -# run: | -# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml -# kubectl apply -f grpcurl-routeguide.yaml -# sleep 180 -# kubectl logs deploy/sleep + - name: load-test-http in Kubernetes + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ + --set "tasks={ready,http}" \ + --set ready.deploy=httpbin \ + --set ready.service=httpbin \ + --set ready.timeout=60s \ + --set http.url=http://httpbin.default/get \ + --set http.duration="3s" + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + - name: load-test-http with payload in Kubernetes + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ + --set "tasks={ready,http}" \ + --set ready.deploy=httpbin \ + --set ready.service=httpbin \ + --set ready.timeout=60s \ + --set http.url=http://httpbin.default/post \ + --set http.payloadStr=hello \ + --set http.duration="3s" + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + - name: load-test-http with multiple endpoints in Kubernetes + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ + --set "tasks={ready,http}" \ + --set ready.deploy=httpbin \ + --set ready.service=httpbin \ + --set ready.timeout=60s \ + --set http.endpoints.get.url=http://httpbin.default/get \ + --set http.endpoints.getAnything.url=http://httpbin.default/anything \ + --set http.endpoints.post.url=http://httpbin.default/post \ + --set http.endpoints.post.payloadStr=hello \ + --set http.duration="3s" + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + kubernetes-load-test-grpc: + name: gRPC load test with various URLs + needs: build-push-test-image + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create routeguide application + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + + - name: Test gRPC service with grpcurl + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & - -# - name: load test grpc service -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=routeguide \ -# --set ready.service=routeguide \ -# --set ready.timeout=60s \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json -# sleep 60 - -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete - -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - -# - name: load test grpc service with multiple endpoints -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=routeguide \ -# --set ready.service=routeguide \ -# --set ready.timeout=60s \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ -# --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ -# --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json -# sleep 60 - -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete - -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - -# kubernetes-load-test-grpc2: -# name: gRPC load test 2 with various URLs -# needs: build-push-test-image -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - name: Check out code into the Go module directory -# uses: actions/checkout@v3 -# - name: Build and install Iter8 -# run: make install - -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s - -# - name: Create hello application -# run: | -# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 -# kubectl expose deploy hello --port=50051 - -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & - -# - name: load test grpc service with protoURL -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=hello \ -# --set ready.service=hello \ -# --set ready.timeout=60s \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ -# --set grpc.data.name="frodo" -# sleep 60 - -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete - -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - -# - name: load test grpc service with proto/data/metadata URLs -# run: | -# iter8 k launch --localChart --chartName charts/iter8 \ -# --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=hello \ -# --set ready.service=hello \ -# --set ready.timeout=60s \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ -# --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ -# --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" -# sleep 60 - -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete - -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: load test grpc service + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.call=routeguide.RouteGuide.GetFeature \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + - name: load test grpc service with multiple endpoints + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ + --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ + --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + kubernetes-load-test-grpc2: + name: gRPC load test 2 with various URLs + needs: build-push-test-image + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create hello application + run: | + kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 + kubectl expose deploy hello --port=50051 + + - name: Install controller + run: | + helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: load test grpc service with protoURL + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=hello \ + --set ready.service=hello \ + --set ready.timeout=60s \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + --set grpc.data.name="frodo" + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + - name: load test grpc service with proto/data/metadata URLs + run: | + iter8 k launch --localChart --chartName charts/iter8 \ + --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=hello \ + --set ready.service=hello \ + --set ready.timeout=60s \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ + --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f From aad6f4e1bf38a342f5c308fbf106a135a90ead9d Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 13:47:27 -0400 Subject: [PATCH 112/121] Respond to Sri's comments about workflows Signed-off-by: Alan Cha --- .github/workflows/assets.yaml | 267 ------- .github/workflows/draftrelease.yaml | 33 +- .github/workflows/golangci-lint.yml | 88 ++- .github/workflows/linkcheck.yaml | 56 +- .github/workflows/lintcharts.yaml | 114 +-- .github/workflows/lintcharts2.yaml | 135 ++-- .github/workflows/releaseassets.yaml | 272 +++++++ .github/workflows/releasecharts.yaml | 434 ++++++++++- .github/workflows/spellcheck.yaml | 40 +- .github/workflows/test.yaml | 122 ---- .github/workflows/testcharts.yaml | 966 ++++++++++++------------- .github/workflows/testcode.yaml | 129 ++++ .github/workflows/testkustomize.yaml | 219 +++--- .github/workflows/testperformance.yaml | 370 ---------- .github/workflows/versionbump.yaml | 182 ++--- 15 files changed, 1729 insertions(+), 1698 deletions(-) delete mode 100644 .github/workflows/assets.yaml create mode 100644 .github/workflows/releaseassets.yaml delete mode 100644 .github/workflows/test.yaml create mode 100644 .github/workflows/testcode.yaml delete mode 100644 .github/workflows/testperformance.yaml diff --git a/.github/workflows/assets.yaml b/.github/workflows/assets.yaml deleted file mode 100644 index 0fafd2960..000000000 --- a/.github/workflows/assets.yaml +++ /dev/null @@ -1,267 +0,0 @@ -# name: Publish binaries and Docker image - -# on: -# release: -# types: [published] - -# jobs: -# assets: -# name: Publish binaries -# runs-on: ubuntu-latest -# steps: -# - name: Install Go -# uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - uses: actions/checkout@v3 -# - name: Build binaries -# run: | -# VERSION=${GITHUB_REF#refs/*/} -# echo "Version: ${VERSION}" -# make dist -# - name: Upload binaries to release -# uses: svenstaro/upload-release-action@v2 -# with: -# repo_token: ${{ secrets.GITHUB_TOKEN }} -# file: _dist/iter8-*.tar.gz -# tag: ${{ github.ref }} -# overwrite: true -# file_glob: true -# - name: Create checksum -# run: | -# VERSION=${GITHUB_REF#refs/*/} -# echo "VERSION=$VERSION" >> $GITHUB_ENV -# wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip -# sha256sum ${VERSION}.zip > checksum.txt -# wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz -# sha256sum ${VERSION}.tar.gz >> checksum.txt -# cd _dist -# for f in iter8-*.tar.gz -# do -# sha256sum ${f} >> ../checksum.txt -# done -# # pick up darwin checksum and export it -# echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV -# - name: Upload checksum to release -# uses: svenstaro/upload-release-action@v2 -# with: -# repo_token: ${{ secrets.GITHUB_TOKEN }} -# asset_name: checksum.txt -# file: checksum.txt -# tag: ${{ github.ref }} -# overwrite: true - -# build-and-push: -# name: Push Iter8 image to Docker Hub -# needs: assets -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: 0 -# - name: Get version -# run: | -# tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') -# # Strip "v" prefix from tagref -# echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV -# echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV -# - name: Get owner -# run: | -# ownerrepo=${{ github.repository }} -# owner=$(echo $ownerrepo | cut -f1 -d/) -# if [[ "$owner" == "iter8-tools" ]]; then -# owner=iter8 -# fi -# echo "OWNER=$owner" >> $GITHUB_ENV -# - uses: docker/setup-buildx-action@v2 -# - uses: docker/login-action@v2 -# with: -# username: ${{ secrets.DOCKERHUB_USERNAME }} -# password: ${{ secrets.DOCKERHUB_SECRET }} -# - uses: docker/build-push-action@v4 -# with: -# platforms: linux/amd64,linux/arm64 -# tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest -# push: true -# build-args: | -# TAG=v${{ env.VERSION }} - -# kubernetes-http-experiment: -# name: Kubernetes HTTP load test -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create httpbin application -# run: | -# kubectl create deployment httpbin --image=kennethreitz/httpbin -# kubectl expose deployment httpbin --type=ClusterIP --port=80 -# kubectl wait --for=condition=available --timeout=60s deploy/httpbin -# - name: Install controller -# run: | -# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 -# - name: iter8 k launch -# run: | -# iter8 k launch \ -# --set tasks={http} \ -# --set http.url="http://httpbin.default/get" -# sleep 60 -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - -# kubernetes-grpc-experiment: -# name: Kubernetes gRPC load test -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create routeguide application -# run: | -# kubectl create deployment routeguide --image=golang --port=50051 \ -# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" -# kubectl expose deployment routeguide --port=50051 -# kubectl wait --for=condition=available --timeout=60s deployment/routeguide -# - name: Install controller -# run: | -# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 -# - name: Test gRPC service with grpcurl -# run: | -# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml -# kubectl apply -f grpcurl-routeguide.yaml -# sleep 180 -# kubectl logs deploy/sleep -# - name: iter8 k launch -# run: | -# iter8 k launch \ -# --set tasks={grpc} \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ -# sleep 60 -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - -# kubernetes-grpc-experiment2: -# name: Kubernetes gRPC load test 2 -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create hello application -# run: | -# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 -# kubectl expose deploy hello --port=50051 -# kubectl wait --for=condition=available --timeout=60s deploy/hello -# - name: Install controller -# run: | -# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 -# - name: iter8 k launch -# run: | -# iter8 k launch \ -# --set tasks={grpc} \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" -# sleep 60 -# - name: Try other iter8 k commands -# run: | -# iter8 k log -# iter8 k delete -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - -# readiness: -# name: Kubernetes readiness test -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create httpbin application -# run: | -# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 -# kubectl expose deploy httpbin --port=80 -# - name: Install controller -# run: | -# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 -# - name: k launch with readiness checks -# run: | -# iter8 k launch \ -# --set "tasks={ready,http}" \ -# --set ready.deploy="httpbin" \ -# --set ready.service="httpbin" \ -# --set ready.timeout=60s \ -# --set http.url=http://httpbin.default/get -# sleep 60 -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - -# readiness-with-namespace: -# name: Kubernetes readiness test with namespace -# needs: build-and-push -# runs-on: ubuntu-latest -# steps: -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# - name: Start kind cluster -# uses: helm/kind-action@v1.5.0 -# with: -# wait: 300s -# - name: Create httpbin application -# run: | -# kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 -# kubectl expose deploy httpbin --port=80 -# kubectl create namespace experiments -# - name: Install controller -# run: | -# helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 -# - name: k launch with readiness checks -# run: | -# iter8 k launch -n experiments \ -# --set "tasks={ready,http}" \ -# --set ready.deploy="httpbin" \ -# --set ready.service="httpbin" \ -# --set ready.timeout=60s \ -# --set ready.namespace=default \ -# --set http.url=http://httpbin.default/get -# sleep 60 -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f \ No newline at end of file diff --git a/.github/workflows/draftrelease.yaml b/.github/workflows/draftrelease.yaml index 5236cebf8..9db7a6cee 100644 --- a/.github/workflows/draftrelease.yaml +++ b/.github/workflows/draftrelease.yaml @@ -1,18 +1,19 @@ -# name: Release drafter +name: Release drafter -# on: -# push: -# # branches to consider in the event; optional, defaults to all -# branches: -# - master +# Runs when changes are pushed -# jobs: -# update_release_draft: -# runs-on: ubuntu-latest -# steps: -# # Drafts your next Release notes as Pull Requests are merged into any tracked branch -# - uses: release-drafter/release-drafter@v5 -# with: -# config-name: release-config.yaml -# env: -# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file +on: + push: + branches: + - master + +jobs: + update_release_draft: + runs-on: ubuntu-latest + steps: + # Drafts your next Release notes as Pull Requests are merged into any tracked branch + - uses: release-drafter/release-drafter@v5 + with: + config-name: release-config.yaml + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 3916deceb..a62cf3c45 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -1,43 +1,51 @@ -# name: golangci-lint +name: golangci-lint -# on: -# pull_request: +# Only runs when there are golang code changes -# permissions: -# contents: read -# # Optional: allow read access to pull request. Use with `only-new-issues` option. -# # pull-requests: read +# Lint golang files + +on: + pull_request: + branches: + - master + paths: + - **.go + +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read -# jobs: -# golangci: -# name: lint -# runs-on: ubuntu-latest -# steps: -# - uses: actions/setup-go@v4 -# with: -# go-version: 1.19 -# - uses: actions/checkout@v3 -# - name: golangci-lint -# uses: golangci/golangci-lint-action@v3 -# with: -# # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version -# version: v1.50.1 - -# # Optional: working directory, useful for monorepos -# # working-directory: somedir - -# # Optional: golangci-lint command line arguments. -# # args: --issues-exit-code=0 - -# # Optional: show only new issues if it's a pull request. The default value is `false`. -# # only-new-issues: true - -# # Optional: if set to true then the all caching functionality will be complete disabled, -# # takes precedence over all other caching options. -# # skip-cache: true - -# # Optional: if set to true then the action don't cache or restore ~/go/pkg. -# # skip-pkg-cache: true - -# # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. -# # skip-build-cache: true \ No newline at end of file +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v4 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version + version: v1.50.1 + + # Optional: working directory, useful for monorepos + # working-directory: somedir + + # Optional: golangci-lint command line arguments. + # args: --issues-exit-code=0 + + # Optional: show only new issues if it's a pull request. The default value is `false`. + # only-new-issues: true + + # Optional: if set to true then the all caching functionality will be complete disabled, + # takes precedence over all other caching options. + # skip-cache: true + + # Optional: if set to true then the action don't cache or restore ~/go/pkg. + # skip-pkg-cache: true + + # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. + # skip-build-cache: true \ No newline at end of file diff --git a/.github/workflows/linkcheck.yaml b/.github/workflows/linkcheck.yaml index 2cbb7f145..9057cf83f 100644 --- a/.github/workflows/linkcheck.yaml +++ b/.github/workflows/linkcheck.yaml @@ -1,29 +1,35 @@ -# name: Link checker +name: Link checker -# on: -# pull_request: -# branches: -# - master -# schedule: -# - cron: "0 0 1 * *" +# Only runs when there are markdown changes and intermittently -# # A workflow run is made up of one or more jobs that can run sequentially or in parallel -# jobs: -# # This workflow contains a single job called "build" -# build: -# # The type of runner that the job will run on -# runs-on: ubuntu-latest +# Check links across markdown files -# # Steps represent a sequence of tasks that will be executed as part of the job -# steps: -# # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it -# - uses: actions/checkout@v3 +on: + pull_request: + branches: + - master + paths: + - **.md + schedule: + - cron: "0 0 1 * *" -# - name: Link checker -# id: lychee -# uses: lycheeverse/lychee-action@v1.6.1 -# env: -# GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} -# with: -# fail: true -# args: -v '**/*.md' \ No newline at end of file +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v3 + + - name: Link checker + id: lychee + uses: lycheeverse/lychee-action@v1.6.1 + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + with: + fail: true + args: -v '**/*.md' \ No newline at end of file diff --git a/.github/workflows/lintcharts.yaml b/.github/workflows/lintcharts.yaml index d99e31be7..a121cb095 100644 --- a/.github/workflows/lintcharts.yaml +++ b/.github/workflows/lintcharts.yaml @@ -1,65 +1,71 @@ -# name: Lint Helm charts +name: Lint Helm charts -# on: -# pull_request: -# branches: -# - master +# Only runs when charts have changed -# jobs: -# # Get the paths for the Helm charts to lint -# get_paths: -# runs-on: ubuntu-latest +# Lint Helm charts -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: 0 +on: + pull_request: + branches: + - master + paths: + - charts/** -# - name: Get the paths for Helm charts to lint -# id: set-matrix -# run: | -# # Get paths (in string form) -# stringPaths=$(find -maxdepth 2 -path './charts/*') +jobs: + # Get the paths for the Helm charts to lint + get_paths: + runs-on: ubuntu-latest -# # Check paths (length greater than 0) -# stringPathsLength=$(echo ${#stringPaths}) -# if (( stringPathsLength == 0 )); -# then -# echo "No paths to check" -# exit 1 -# fi + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 -# # Serialize paths into JSON array -# paths=$(jq -ncR '[inputs]' <<< "$stringPaths") + - name: Get the paths for Helm charts to lint + id: set-matrix + run: | + # Get paths (in string form) + stringPaths=$(find -maxdepth 2 -path './charts/*') -# # Output serialized paths -# echo "matrix=$paths" >> $GITHUB_OUTPUT -# echo $paths + # Check paths (length greater than 0) + stringPathsLength=$(echo ${#stringPaths}) + if (( stringPathsLength == 0 )); + then + echo "No paths to check" + exit 1 + fi -# outputs: -# matrix: ${{ steps.set-matrix.outputs.matrix }} + # Serialize paths into JSON array + paths=$(jq -ncR '[inputs]' <<< "$stringPaths") -# # Lint Helm charts based on paths provided by previous job -# lint: -# name: Test changed-files -# needs: get_paths -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_paths.outputs.matrix) }} -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: 0 + # Output serialized paths + echo "matrix=$paths" >> $GITHUB_OUTPUT + echo $paths -# - name: Get modified files in the ${{ matrix.version }} folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: ${{ matrix.version }} + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} -# - name: Lint Helm charts in the ${{ matrix.version }} folder -# uses: stackrox/kube-linter-action@v1 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# directory: ${{ matrix.version }} \ No newline at end of file + # Lint Helm charts based on paths provided by previous job + lint: + name: Test changed-files + needs: get_paths + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_paths.outputs.matrix) }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Get modified files in the ${{ matrix.version }} folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: ${{ matrix.version }} + + - name: Lint Helm charts in the ${{ matrix.version }} folder + uses: stackrox/kube-linter-action@v1 + if: steps.modified-files.outputs.any_modified == 'true' + with: + directory: ${{ matrix.version }} \ No newline at end of file diff --git a/.github/workflows/lintcharts2.yaml b/.github/workflows/lintcharts2.yaml index 89f13bc2e..18b9df2a1 100644 --- a/.github/workflows/lintcharts2.yaml +++ b/.github/workflows/lintcharts2.yaml @@ -1,77 +1,84 @@ -# name: Additional Helm chart linting -# # Like lintcharts.yaml, the other lint Helm chart workflow, this workflow uses kube-linter -# # kube-linter checks Helm templates but it does not check what is contained in {{ define ... }} blocks -# # This workflow builds on the other workflow by producing Kubernetes YAML files from the templates and running kube-linter on those files -# # See iter8-tools/iter8#1452 +name: Additional Helm chart linting +# Like lintcharts.yaml, the other lint Helm chart workflow, this workflow uses kube-linter +# kube-linter checks Helm templates but it does not check what is contained in {{ define ... }} blocks +# This workflow builds on the other workflow by producing Kubernetes YAML files from the templates and running kube-linter on those files +# See iter8-tools/iter8#1452 -# on: -# pull_request: -# branches: -# - master +# Only runs when charts have changed -# jobs: -# http-experiment: -# name: Lint HTTP experiment -# runs-on: ubuntu-latest +# Lint Helm charts +# Use templates to create Kubernetes YAML files and lint them -# steps: -# - name: Check out code -# uses: actions/checkout@v3 +on: + pull_request: + branches: + - master + paths: + - charts/** -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 +jobs: + http-experiment: + name: Lint HTTP experiment + runs-on: ubuntu-latest -# - uses: azure/setup-helm@v3 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# token: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Check out code + uses: actions/checkout@v3 -# - name: Create Kubernetes YAML file -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# helm template charts/iter8 \ -# --set tasks={http} \ -# --set http.url=http://httpbin.default/get >> iter8.yaml + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 -# - name: Lint Kubernetes YAML file -# if: steps.modified-files.outputs.any_modified == 'true' -# uses: stackrox/kube-linter-action@v1 -# with: -# directory: iter8.yaml + - uses: azure/setup-helm@v3 + if: steps.modified-files.outputs.any_modified == 'true' + with: + token: ${{ secrets.GITHUB_TOKEN }} -# grpc-experiment: -# name: Lint gRPC experiment -# runs-on: ubuntu-latest + - name: Create Kubernetes YAML file + if: steps.modified-files.outputs.any_modified == 'true' + run: | + helm template charts/iter8 \ + --set tasks={http} \ + --set http.url=http://httpbin.default/get >> iter8.yaml -# steps: -# - name: Check out code -# uses: actions/checkout@v3 + - name: Lint Kubernetes YAML file + if: steps.modified-files.outputs.any_modified == 'true' + uses: stackrox/kube-linter-action@v1 + with: + directory: iter8.yaml -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 + grpc-experiment: + name: Lint gRPC experiment + runs-on: ubuntu-latest -# - uses: azure/setup-helm@v3 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# token: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Check out code + uses: actions/checkout@v3 -# - name: Create Kubernetes YAML file -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# helm template charts/iter8 \ -# --set tasks={grpc} \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" >> iter8.yaml + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 -# - name: Lint Kubernetes YAML file -# if: steps.modified-files.outputs.any_modified == 'true' -# uses: stackrox/kube-linter-action@v1 -# with: -# directory: iter8.yaml + - uses: azure/setup-helm@v3 + if: steps.modified-files.outputs.any_modified == 'true' + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Create Kubernetes YAML file + if: steps.modified-files.outputs.any_modified == 'true' + run: | + helm template charts/iter8 \ + --set tasks={grpc} \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" >> iter8.yaml + + - name: Lint Kubernetes YAML file + if: steps.modified-files.outputs.any_modified == 'true' + uses: stackrox/kube-linter-action@v1 + with: + directory: iter8.yaml diff --git a/.github/workflows/releaseassets.yaml b/.github/workflows/releaseassets.yaml new file mode 100644 index 000000000..530f115ac --- /dev/null +++ b/.github/workflows/releaseassets.yaml @@ -0,0 +1,272 @@ +name: Publish binaries and Docker image + +# Runs when a release is published + +# Build and publish binaries and release Docker image +# Test Docker image + +on: + release: + types: [published] + +jobs: + assets: + name: Publish binaries + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - uses: actions/checkout@v3 + - name: Build binaries + run: | + VERSION=${GITHUB_REF#refs/*/} + echo "Version: ${VERSION}" + make dist + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: _dist/iter8-*.tar.gz + tag: ${{ github.ref }} + overwrite: true + file_glob: true + - name: Create checksum + run: | + VERSION=${GITHUB_REF#refs/*/} + echo "VERSION=$VERSION" >> $GITHUB_ENV + wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.zip + sha256sum ${VERSION}.zip > checksum.txt + wget https://github.com/iter8-tools/iter8/archive/refs/tags/${VERSION}.tar.gz + sha256sum ${VERSION}.tar.gz >> checksum.txt + cd _dist + for f in iter8-*.tar.gz + do + sha256sum ${f} >> ../checksum.txt + done + # pick up darwin checksum and export it + echo "SHAFORMAC=$(grep darwin ../checksum.txt | awk '{print $1}')" >> $GITHUB_ENV + - name: Upload checksum to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + asset_name: checksum.txt + file: checksum.txt + tag: ${{ github.ref }} + overwrite: true + + build-and-push: + name: Push Iter8 image to Docker Hub + needs: assets + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Get version + run: | + tagref=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') + # Strip "v" prefix from tagref + echo "VERSION=$(echo $tagref | sed -e 's/^v//')" >> $GITHUB_ENV + echo "MAJOR_MINOR_VERSION=$(echo $tagref | sed -e 's/^v//' -e 's,\([0-9]*\.[0-9]*\)\.\([0-9]*\),\1,')" >> $GITHUB_ENV + - name: Get owner + run: | + ownerrepo=${{ github.repository }} + owner=$(echo $ownerrepo | cut -f1 -d/) + if [[ "$owner" == "iter8-tools" ]]; then + owner=iter8 + fi + echo "OWNER=$owner" >> $GITHUB_ENV + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_SECRET }} + - uses: docker/build-push-action@v4 + with: + platforms: linux/amd64,linux/arm64 + tags: ${{ env.OWNER }}/iter8:${{ env.VERSION }},${{ env.OWNER }}/iter8:${{ env.MAJOR_MINOR_VERSION }},${{ env.OWNER }}/iter8:latest + push: true + build-args: | + TAG=v${{ env.VERSION }} + + kubernetes-http-experiment: + name: Kubernetes HTTP load test + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create httpbin application + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: iter8 k launch + run: | + iter8 k launch \ + --set tasks={http} \ + --set http.url="http://httpbin.default/get" + sleep 60 + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + kubernetes-grpc-experiment: + name: Kubernetes gRPC load test + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create routeguide application + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: Test gRPC service with grpcurl + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep + - name: iter8 k launch + run: | + iter8 k launch \ + --set tasks={grpc} \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.call=routeguide.RouteGuide.GetFeature \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + sleep 60 + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + kubernetes-grpc-experiment2: + name: Kubernetes gRPC load test 2 + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create hello application + run: | + kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 + kubectl expose deploy hello --port=50051 + kubectl wait --for=condition=available --timeout=60s deploy/hello + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: iter8 k launch + run: | + iter8 k launch \ + --set tasks={grpc} \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" + sleep 60 + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + readiness: + name: Kubernetes readiness test + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create httpbin application + run: | + kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + kubectl expose deploy httpbin --port=80 + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: k launch with readiness checks + run: | + iter8 k launch \ + --set "tasks={ready,http}" \ + --set ready.deploy="httpbin" \ + --set ready.service="httpbin" \ + --set ready.timeout=60s \ + --set http.url=http://httpbin.default/get + sleep 60 + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + readiness-with-namespace: + name: Kubernetes readiness test with namespace + needs: build-and-push + runs-on: ubuntu-latest + steps: + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + - name: Create httpbin application + run: | + kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + kubectl expose deploy httpbin --port=80 + kubectl create namespace experiments + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + - name: k launch with readiness checks + run: | + iter8 k launch -n experiments \ + --set "tasks={ready,http}" \ + --set ready.deploy="httpbin" \ + --set ready.service="httpbin" \ + --set ready.timeout=60s \ + --set ready.namespace=default \ + --set http.url=http://httpbin.default/get + sleep 60 + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f \ No newline at end of file diff --git a/.github/workflows/releasecharts.yaml b/.github/workflows/releasecharts.yaml index e926d7465..ea7a8a581 100644 --- a/.github/workflows/releasecharts.yaml +++ b/.github/workflows/releasecharts.yaml @@ -1,34 +1,400 @@ -# name: Release charts - -# on: -# push: -# branches: -# - master - -# jobs: -# release: -# permissions: -# contents: write -# runs-on: ubuntu-latest -# steps: -# - name: Checkout -# uses: actions/checkout@v3 -# with: -# fetch-depth: 0 - -# - name: Configure Git -# run: | -# git config user.name "$GITHUB_ACTOR" -# git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - -# - name: Install Helm -# uses: azure/setup-helm@v3 -# with: -# token: ${{ secrets.GITHUB_TOKEN }} - -# - name: Run chart-releaser -# uses: helm/chart-releaser-action@v1.5.0 -# with: -# config: config.yaml -# env: -# CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" +name: Publish charts + +# Only runs when charts are pushed + +# Release charts and tests them + +on: + push: + branches: + - master + paths: + - charts/** + +jobs: + release-charts: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Install Helm + uses: azure/setup-helm@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.5.0 + with: + config: config.yaml + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + http-experiment: + name: HTTP load test + needs: release-charts + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create httpbin application + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + run: | + iter8 k launch \ + --set "tasks={http}" \ + --set http.url="http://httpbin.default/get" \ + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + http-payload-experiment: + name: HTTP load test with payload + needs: release-charts + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create httpbin application + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + run: | + iter8 k launch \ + --set "tasks={http}" \ + --set http.url="http://httpbin.default/post" \ + --set http.payloadStr=hello \ + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + http-multiple-experiment: + name: HTTP load test with multiple endpoints + needs: release-charts + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create httpbin application + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + run: | + iter8 k launch \ + --set "tasks={http}" \ + --set http.endpoints.get.url=http://httpbin.default/get \ + --set http.endpoints.getAnything.url=http://httpbin.default/anything \ + --set http.endpoints.post.url=http://httpbin.default/post \ + --set http.endpoints.post.payloadStr=hello \ + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + grpc-experiment: + name: gRPC load test + needs: release-charts + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create routeguide application + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + + - name: Test gRPC service with grpcurl + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep + + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + run: | + iter8 k launch \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.call=routeguide.RouteGuide.GetFeature \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + grpc-multiple-experiment: + name: gRPC load test with multiple endpoints + needs: release-charts + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create routeguide application + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + + - name: Test gRPC service with grpcurl + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep + + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + run: | + iter8 k launch \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ + --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ + --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + grpc-experiment2: + name: gRPC load test 2 + needs: release-charts + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + - name: Create hello application + run: | + kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 + kubectl expose deploy hello --port=50051 + kubectl wait --for=condition=available --timeout=60s deploy/hello + + - name: Install controller + run: | + helm install --repo https://iter8-tools.github.io/iter8 iter8 controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + run: | + iter8 k launch \ + --set "tasks={grpc}" \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f \ No newline at end of file diff --git a/.github/workflows/spellcheck.yaml b/.github/workflows/spellcheck.yaml index a9f572a7f..716556c1c 100644 --- a/.github/workflows/spellcheck.yaml +++ b/.github/workflows/spellcheck.yaml @@ -1,20 +1,24 @@ -# name: Spell check markdown +name: Spell check markdown -# on: -# pull_request: -# branches: -# - master +# Runs during pull request -# jobs: -# spell-check: -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: 0 -# - run: | -# pwd -# ls -l -# - uses: rojopolis/spellcheck-github-actions@0.29.0 -# with: -# config_path: .github/spellcheck.yml +# Spell check markdown + +on: + pull_request: + branches: + - master + +jobs: + spell-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - run: | + pwd + ls -l + - uses: rojopolis/spellcheck-github-actions@0.29.0 + with: + config_path: .github/spellcheck.yml diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml deleted file mode 100644 index 251475e38..000000000 --- a/.github/workflows/test.yaml +++ /dev/null @@ -1,122 +0,0 @@ -name: Test - -on: - pull_request: - branches: - - master - -jobs: - assets: - name: Build and push test Docker image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: Create Dockerfile.dev - run: | - echo "# Small linux image with iter8 binary - FROM debian:buster-slim - - # Install curl - RUN apt-get update && apt-get install -y curl - - # Download iter8 compressed binary - # use COPY instead of wget - COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz - - # Extract iter8 - RUN tar -xvf iter8-linux-amd64.tar.gz - - # Extract iter8 - RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - - name: Get version and tag - id: versionTag - run: | - # GitHub ref name - VERSION=${GITHUB_REF_NAME} - echo "VERSION: $VERSION" - echo "VERSION=${VERSION}" >> "$GITHUB_ENV" - echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" - - name: Get owner - run: | - OWNER_REPO=${{ github.repository }} - OWNER=$(echo $OWNER_REPO | cut -f1 -d/) - if [[ "$OWNER" == "iter8-tools" ]]; then - OWNER=iter8 - fi - echo "OWNER: $OWNER" - echo "OWNER=$OWNER" >> $GITHUB_ENV - - name: Get image tag - id: imageTag - run: | - # Docker image - IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) - echo "IMAGE_TAG: $IMAGE_TAG" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - - uses: docker/setup-buildx-action@v2 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_SECRET }} - - uses: docker/build-push-action@v4 - with: - platforms: linux/amd64 - tags: ${{ env.IMAGE_TAG }} - push: true - file: "{context}/Dockerfile.dev" - outputs: - VERSION: ${{ steps.versionTag.outputs.VERSION }} - IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} - - kubernetes-load-test-http: - name: HTTP load test (with readiness) at the edge of Kubernetes - needs: assets - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - name: Create httpbin application - run: | - kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 - kubectl expose deploy httpbin --port=80 - kubectl wait --for=condition=available --timeout=60s deploy/httpbin - - name: Install controller - run: | - helm install iter8 charts/controller --set image=${{needs.assets.outputs.IMAGE_TAG}} --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - name: load-test-http in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{needs.assets.outputs.IMAGE_TAG}} \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default/get \ - --set http.duration="3s" \ - --set logLevel=trace - sleep 60 - - name: Test additional Iter8 commands - run: | - iter8 k log - iter8 k delete - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f \ No newline at end of file diff --git a/.github/workflows/testcharts.yaml b/.github/workflows/testcharts.yaml index 9a068d8db..aafd78012 100644 --- a/.github/workflows/testcharts.yaml +++ b/.github/workflows/testcharts.yaml @@ -1,499 +1,473 @@ -# name: Tests to ensure that changes to charts do not break user experience - -# on: -# pull_request: - -# # Kind versions used to test Iter8 on different versions of Kubernetes -# # From: https://github.com/kubernetes-sigs/kind/releases -# env: -# versions: | -# kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f -# kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f -# kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 -# kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c -# kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 -# kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 -# kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 -# kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 - -# jobs: -# # Get the different Kind versions -# get_versions: -# runs-on: ubuntu-latest - -# steps: -# - name: Get the different Kind versions -# id: set-matrix -# run: | -# # Serialize versions into JSON array -# jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") -# echo $jsonVersions - -# # Output serialized jsonVersions -# echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT - -# outputs: -# matrix: ${{ steps.set-matrix.outputs.matrix }} - -# http-experiment: -# name: HTTP load test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 - -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create httpbin application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment httpbin --image=kennethreitz/httpbin -# kubectl expose deployment httpbin --type=ClusterIP --port=80 -# kubectl wait --for=condition=available --timeout=60s deploy/httpbin - -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={http}" \ -# --set http.url="http://httpbin.default/get" \ -# sleep 60 - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k log -# iter8 k delete - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & - -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - -# http-payload-experiment: -# name: HTTP load test with payload -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 - -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create httpbin application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment httpbin --image=kennethreitz/httpbin -# kubectl expose deployment httpbin --type=ClusterIP --port=80 -# kubectl wait --for=condition=available --timeout=60s deploy/httpbin - -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={http}" \ -# --set http.url="http://httpbin.default/post" \ -# --set http.payloadStr=hello \ -# sleep 60 - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k log -# iter8 k delete - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & - -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - -# http-multiple-experiment: -# name: HTTP load test with multiple endpoints -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 - -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create httpbin application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment httpbin --image=kennethreitz/httpbin -# kubectl expose deployment httpbin --type=ClusterIP --port=80 -# kubectl wait --for=condition=available --timeout=60s deploy/httpbin - -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={http}" \ -# --set http.endpoints.get.url=http://httpbin.default/get \ -# --set http.endpoints.getAnything.url=http://httpbin.default/anything \ -# --set http.endpoints.post.url=http://httpbin.default/post \ -# --set http.endpoints.post.payloadStr=hello \ -# sleep 60 - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k log -# iter8 k delete - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & - -# - name: Check GET /httpDashboard -# run: | -# curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - -# grpc-experiment: -# name: gRPC load test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 +name: Check changes to charts do not break user experience + +# Only runs when charts have changed + +# Test changes to charts against released images +# TODO: test if changes to kustomize have been replicated for charts +# NOTE: charts/controller is being tested in all of the http and grpc tests + +on: + pull_request: + branches: + - master + paths: + - charts/** + +# Kind versions used to test Iter8 on different versions of Kubernetes +# From: https://github.com/kubernetes-sigs/kind/releases +env: + versions: | + kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 + kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72 + kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb + kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8 + kindest/node:v1.24.15@sha256:7db4f8bea3e14b82d12e044e25e34bd53754b7f2b0e9d56df21774e6f66a70ab + +jobs: + # Get the different Kind versions + get_versions: + runs-on: ubuntu-latest + + steps: + - name: Get the different Kind versions + id: set-matrix + run: | + # Serialize versions into JSON array + jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") + echo $jsonVersions + + # Output serialized jsonVersions + echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT + + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + + # TODO: add check to verify when a change is made to kustomize, a similar change is made to charts + + http-experiment: + name: HTTP load test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create httpbin application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: Install controller + run: | + helm install iter8 charts/controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={http}" \ + --set http.url="http://httpbin.default/get" \ + sleep 60 + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + http-payload-experiment: + name: HTTP load test with payload + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create httpbin application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: Install controller + run: | + helm install iter8 charts/controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={http}" \ + --set http.url="http://httpbin.default/post" \ + --set http.payloadStr=hello \ + sleep 60 + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + http-multiple-experiment: + name: HTTP load test with multiple endpoints + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 + + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create httpbin application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment httpbin --image=kennethreitz/httpbin + kubectl expose deployment httpbin --type=ClusterIP --port=80 + kubectl wait --for=condition=available --timeout=60s deploy/httpbin + + - name: Install controller + run: | + helm install iter8 charts/controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={http}" \ + --set http.endpoints.get.url=http://httpbin.default/get \ + --set http.endpoints.getAnything.url=http://httpbin.default/anything \ + --set http.endpoints.post.url=http://httpbin.default/post \ + --set http.endpoints.post.payloadStr=hello \ + sleep 60 + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /httpDashboard + run: | + curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f + + grpc-experiment: + name: gRPC load test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create routeguide application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment routeguide --image=golang --port=50051 \ -# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" -# kubectl expose deployment routeguide --port=50051 -# kubectl wait --for=condition=available --timeout=60s deployment/routeguide - -# - name: Test gRPC service with grpcurl -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml -# kubectl apply -f grpcurl-routeguide.yaml -# sleep 180 -# kubectl logs deploy/sleep - -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=routeguide \ -# --set ready.service=routeguide \ -# --set ready.timeout=60s \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ -# sleep 60 - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k log -# iter8 k delete - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & - -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - -# grpc-multiple-experiment: -# name: gRPC load test with multiple endpoints -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.15 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create routeguide application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + + - name: Test gRPC service with grpcurl + if: steps.modified-files.outputs.any_modified == 'true' + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep + + - name: Install controller + run: | + helm install iter8 charts/controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.call=routeguide.RouteGuide.GetFeature \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + sleep 60 + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + grpc-multiple-experiment: + name: gRPC load test with multiple endpoints + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create routeguide application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deployment routeguide --image=golang --port=50051 \ -# -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" -# kubectl expose deployment routeguide --port=50051 -# kubectl wait --for=condition=available --timeout=60s deployment/routeguide - -# - name: Test gRPC service with grpcurl -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml -# kubectl apply -f grpcurl-routeguide.yaml -# sleep 180 -# kubectl logs deploy/sleep - -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={ready,grpc}" \ -# --set ready.deploy=routeguide \ -# --set ready.service=routeguide \ -# --set ready.timeout=60s \ -# --set grpc.host=routeguide.default:50051 \ -# --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ -# --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ -# --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ -# --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ -# --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ -# sleep 60 - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k log -# iter8 k delete - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & - -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - -# grpc-experiment2: -# name: gRPC load test 2 -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/iter8 folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/iter8 + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create routeguide application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deployment routeguide --image=golang --port=50051 \ + -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + kubectl expose deployment routeguide --port=50051 + kubectl wait --for=condition=available --timeout=60s deployment/routeguide + + - name: Test gRPC service with grpcurl + if: steps.modified-files.outputs.any_modified == 'true' + run: | + curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + kubectl apply -f grpcurl-routeguide.yaml + sleep 180 + kubectl logs deploy/sleep + + - name: Install controller + run: | + helm install iter8 charts/controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ + --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ + --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ + --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json \ + sleep 60 + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f + + grpc-experiment2: + name: gRPC load test 2 + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the charts/iter8 folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: charts/iter8 -# - name: Install Iter8 -# run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 -# if: steps.modified-files.outputs.any_modified == 'true' - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Create hello application -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 -# kubectl expose deploy hello --port=50051 -# kubectl wait --for=condition=available --timeout=60s deploy/hello - -# - name: Install controller -# run: | -# helm install iter8 charts/controller --set logLevel=trace -# kubectl rollout status --watch --timeout=60s statefulset/iter8 - -# - name: iter8 k launch -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k launch \ -# --localChart \ -# --chartName charts/iter8 \ -# --set "tasks={grpc}" \ -# --set grpc.host="hello.default:50051" \ -# --set grpc.call="helloworld.Greeter.SayHello" \ -# --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ -# sleep 60 - -# - name: Try other iter8 k commands -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# iter8 k log -# iter8 k delete - -# - name: Expose metrics service -# run: | -# kubectl port-forward service/iter8 8080:8080 & - -# - name: Check GET /grpcDashboard -# run: | -# curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - -# controller: -# name: Controller test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/controller folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/controller - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} - -# - name: Start controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# helm install controller charts/controller -f charts/controller/testdata/values.yaml - -# - name: Check controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl rollout status --watch --timeout=60s statefulset.apps/controller + - name: Install Iter8 + run: GOBIN=/usr/local/bin go install github.com/iter8-tools/iter8@v0.14 + if: steps.modified-files.outputs.any_modified == 'true' + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} + + - name: Create hello application + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 + kubectl expose deploy hello --port=50051 + kubectl wait --for=condition=available --timeout=60s deploy/hello + + - name: Install controller + run: | + helm install iter8 charts/controller --set logLevel=trace + kubectl rollout status --watch --timeout=60s statefulset/iter8 + + - name: iter8 k launch + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k launch \ + --localChart \ + --chartName charts/iter8 \ + --set "tasks={grpc}" \ + --set grpc.host="hello.default:50051" \ + --set grpc.call="helloworld.Greeter.SayHello" \ + --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ + sleep 60 + + - name: Try other iter8 k commands + if: steps.modified-files.outputs.any_modified == 'true' + run: | + iter8 k log + iter8 k delete + + - name: Expose metrics service + run: | + kubectl port-forward service/iter8 8080:8080 & + + - name: Check GET /grpcDashboard + run: | + curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f \ No newline at end of file diff --git a/.github/workflows/testcode.yaml b/.github/workflows/testcode.yaml new file mode 100644 index 000000000..03279b720 --- /dev/null +++ b/.github/workflows/testcode.yaml @@ -0,0 +1,129 @@ +name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) + +# Only runs when golang code (or test cases) have changed + +# Only tests if iter8 CLI works after golang code changes + +on: + pull_request: + branches: + - master + paths: + - **.go + - testdata/** + +jobs: + unit-test: + name: Unit test + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Test and compute coverage + run: make coverage # includes vet and lint + - name: Enforce coverage + run: | + export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') + echo "code coverage is at ${COVERAGE}" + if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ + echo "all good... coverage is above 76.0%"; + else \ + echo "not good... coverage is not above 76.0%"; + exit 1 + fi + + cli-test-http: + name: CLI test with http task + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + # # No need to create httpbin application because only testing CLI + # + # - name: Create httpbin application + # run: | + # kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 + # kubectl expose deploy httpbin --port=80 + + - name: CLI test with http task + run: | + iter8 k launch \ + --set "tasks={ready,http}" \ + --set ready.deploy=httpbin \ + --set ready.service=httpbin \ + --set ready.timeout=60s \ + --set http.url=http://httpbin.default/get \ + --set http.duration="3s" + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete + + cli-test-grpc: + name: CLI test with grpc task + runs-on: ubuntu-latest + steps: + - name: Install Go + uses: actions/setup-go@v4 + with: + go-version: 1.19 + - name: Check out code into the Go module directory + uses: actions/checkout@v3 + - name: Build and install Iter8 + run: make install + + - name: Start kind cluster + uses: helm/kind-action@v1.5.0 + with: + wait: 300s + + # # No need to create routeguide application because only testing CLI + # + # - name: Create routeguide application + # run: | + # kubectl create deployment routeguide --image=golang --port=50051 \ + # -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" + # kubectl expose deployment routeguide --port=50051 + # kubectl wait --for=condition=available --timeout=60s deployment/routeguide + + # - name: Test gRPC service with grpcurl + # run: | + # curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml + # kubectl apply -f grpcurl-routeguide.yaml + # sleep 180 + # kubectl logs deploy/sleep + + - name: CLI test with grpc task + run: | + iter8 k launch \ + --set "tasks={ready,grpc}" \ + --set ready.deploy=routeguide \ + --set ready.service=routeguide \ + --set ready.timeout=60s \ + --set grpc.host=routeguide.default:50051 \ + --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ + --set grpc.call=routeguide.RouteGuide.GetFeature \ + --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json + sleep 60 + + - name: Try other iter8 k commands + run: | + iter8 k log + iter8 k delete \ No newline at end of file diff --git a/.github/workflows/testkustomize.yaml b/.github/workflows/testkustomize.yaml index 96b47eeb8..ceb29602c 100644 --- a/.github/workflows/testkustomize.yaml +++ b/.github/workflows/testkustomize.yaml @@ -1,106 +1,117 @@ -# name: Test kustomize experiments - -# on: -# pull_request: - -# # Kind versions used to test Iter8 on different versions of Kubernetes -# # From: https://github.com/kubernetes-sigs/kind/releases -# env: -# versions: | -# kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f -# kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f -# kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 -# kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c -# kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 -# kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 -# kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 -# kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 - -# jobs: -# # Get the paths for the Helm charts to lint -# get_versions: -# runs-on: ubuntu-latest - -# steps: -# - name: Get the paths for Helm charts to lint -# id: set-matrix -# run: | -# # Serialize versions into JSON array -# jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") -# echo $jsonVersions - -# # Output serialized jsonVersions -# echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT - -# outputs: -# matrix: ${{ steps.set-matrix.outputs.matrix }} - -# controller: -# name: Controller test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/controller folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/controller - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} +name: Check changes to kustomize do not break user experience + +# Only runs when kustomize have changed + +# Test changes to kustomize against released images +# TODO: test if changes to charts have been replicated for kustomize + +on: + pull_request: + branches: + - master + paths: + - kustomize/** + +# Kind versions used to test Iter8 on different versions of Kubernetes +# From: https://github.com/kubernetes-sigs/kind/releases +env: + versions: | + kindest/node:v1.26.3@sha256:61b92f38dff6ccc29969e7aa154d34e38b89443af1a2c14e6cfbd2df6419c66f + kindest/node:v1.25.8@sha256:00d3f5314cc35327706776e95b2f8e504198ce59ac545d0200a89e69fce10b7f + kindest/node:v1.24.12@sha256:1e12918b8bc3d4253bc08f640a231bb0d3b2c5a9b28aa3f2ca1aee93e1e8db16 + kindest/node:v1.23.17@sha256:e5fd1d9cd7a9a50939f9c005684df5a6d145e8d695e78463637b79464292e66c + kindest/node:v1.22.17@sha256:c8a828709a53c25cbdc0790c8afe12f25538617c7be879083248981945c38693 + kindest/node:v1.21.14@sha256:27ef72ea623ee879a25fe6f9982690a3e370c68286f4356bf643467c552a3888 + kindest/node:v1.27.1@sha256:9915f5629ef4d29f35b478e819249e89cfaffcbfeebda4324e5c01d53d937b09 + kindest/node:v1.27.0@sha256:c6b22e613523b1af67d4bc8a0c38a4c3ea3a2b8fbc5b367ae36345c9cb844518 + +jobs: + # Get the paths for the Helm charts to lint + get_versions: + runs-on: ubuntu-latest + + steps: + - name: Get the paths for Helm charts to lint + id: set-matrix + run: | + # Serialize versions into JSON array + jsonVersions=$(jq -ncR '[inputs]' <<< "$versions") + echo $jsonVersions + + # Output serialized jsonVersions + echo "matrix=$jsonVersions" | sed -e "s/,\"\"//" >> $GITHUB_OUTPUT + + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + +# TODO: add check to verify when a change is made to charts, a similar change is made to kustomize + + controller-namespaceScoped: + name: Controller namespace scoped test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the kustomize/controller folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: kustomize/controller + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} -# - name: Start controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl apply -k kustomize/controller/namespaceScoped - -# - name: Check controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 - -# controller-clusterScoped: -# name: Controller cluster scoped test -# needs: get_versions -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_versions.outputs.matrix) }} - -# steps: -# - name: Check out code -# uses: actions/checkout@v3 - -# - name: Get modified files in the charts/controller folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: charts/controller - -# - name: Start kind cluster ${{ matrix.version }} -# uses: helm/kind-action@v1.5.0 -# if: steps.modified-files.outputs.any_modified == 'true' -# with: -# wait: 300s -# node_image: ${{ matrix.version }} + - name: Start controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl apply -k kustomize/controller/namespaceScoped + + - name: Check controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 + + controller-clusterScoped: + name: Controller cluster scoped test + needs: get_versions + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_versions.outputs.matrix) }} + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Get modified files in the kustomize/controller folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: kustomize/controller + + - name: Start kind cluster ${{ matrix.version }} + uses: helm/kind-action@v1.5.0 + if: steps.modified-files.outputs.any_modified == 'true' + with: + wait: 300s + node_image: ${{ matrix.version }} -# - name: Start controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl apply -k kustomize/controller/clusterScoped - -# - name: Check controller -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file + - name: Start controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl apply -k kustomize/controller/clusterScoped + + - name: Check controller + if: steps.modified-files.outputs.any_modified == 'true' + run: | + kubectl rollout status --watch --timeout=60s statefulset.apps/iter8 \ No newline at end of file diff --git a/.github/workflows/testperformance.yaml b/.github/workflows/testperformance.yaml deleted file mode 100644 index 36424a0cb..000000000 --- a/.github/workflows/testperformance.yaml +++ /dev/null @@ -1,370 +0,0 @@ -name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) - -# Only runs when golang code (or test cases) have changed - -on: - pull_request: - branches: - - master - paths: - - **.go - - testdata/** - -jobs: - unit-test: - name: Unit test - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Test and compute coverage - run: make coverage # includes vet and lint - - name: Enforce coverage - run: | - export COVERAGE=$(go tool cover -func coverage.out | grep total | awk '{print substr($3, 1, length($3)-1)}') - echo "code coverage is at ${COVERAGE}" - if [ 1 -eq "$(echo "${COVERAGE} > 76.0" | bc)" ]; then \ - echo "all good... coverage is above 76.0%"; - else \ - echo "not good... coverage is not above 76.0%"; - exit 1 - fi - - build-push-test-image: - name: Build and push test Docker image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - uses: actions/checkout@v3 - - name: Create Dockerfile.dev - run: | - echo "# Small linux image with iter8 binary - FROM debian:buster-slim - - # Install curl - RUN apt-get update && apt-get install -y curl - - # Download iter8 compressed binary - # use COPY instead of wget - COPY _dist/iter8-linux-amd64.tar.gz iter8-linux-amd64.tar.gz - - # Extract iter8 - RUN tar -xvf iter8-linux-amd64.tar.gz - - # Extract iter8 - RUN mv linux-amd64/iter8 /bin/iter8" > Dockerfile.dev - - name: Get version and tag - id: versionTag - run: | - # GitHub ref name - VERSION=${GITHUB_REF_NAME} - echo "VERSION: $VERSION" - echo "VERSION=${VERSION}" >> "$GITHUB_ENV" - echo "VERSION=${VERSION}" >> "$GITHUB_OUTPUT" - - name: Get owner - run: | - OWNER_REPO=${{ github.repository }} - OWNER=$(echo $OWNER_REPO | cut -f1 -d/) - if [[ "$OWNER" == "iter8-tools" ]]; then - OWNER=iter8 - fi - echo "OWNER: $OWNER" - echo "OWNER=$OWNER" >> $GITHUB_ENV - - name: Get image tag - id: imageTag - run: | - # Docker image - IMAGE_TAG=$(echo ${OWNER}/iter8-pr:${VERSION}) - echo "IMAGE_TAG: $IMAGE_TAG" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_ENV" - echo "IMAGE_TAG=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - - uses: docker/setup-buildx-action@v2 - - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_SECRET }} - - uses: docker/build-push-action@v4 - with: - platforms: linux/amd64 - tags: ${{ env.IMAGE_TAG }} - push: true - file: "{context}/Dockerfile.dev" - outputs: - VERSION: ${{ steps.versionTag.outputs.VERSION }} - IMAGE_TAG: ${{ steps.imageTag.outputs.IMAGE_TAG }} - - kubernetes-load-test-http: - name: HTTP load test (with readiness) at the edge of Kubernetes - needs: build-push-test-image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - - name: Create httpbin application - run: | - kubectl create deploy httpbin --image=kennethreitz/httpbin --port=80 - kubectl expose deploy httpbin --port=80 - - - name: Install controller - run: | - helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: load-test-http in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default/get \ - --set http.duration="3s" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - - name: load-test-http with payload in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.url=http://httpbin.default/post \ - --set http.payloadStr=hello \ - --set http.duration="3s" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - - name: load-test-http with multiple endpoints in Kubernetes - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,http}" \ - --set ready.deploy=httpbin \ - --set ready.service=httpbin \ - --set ready.timeout=60s \ - --set http.endpoints.get.url=http://httpbin.default/get \ - --set http.endpoints.getAnything.url=http://httpbin.default/anything \ - --set http.endpoints.post.url=http://httpbin.default/post \ - --set http.endpoints.post.payloadStr=hello \ - --set http.duration="3s" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /httpDashboard - run: | - curl "http://localhost:8080/httpDashboard?namespace=default&experiment=default" -f - - kubernetes-load-test-grpc: - name: gRPC load test with various URLs - needs: build-push-test-image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install - - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - - name: Create routeguide application - run: | - kubectl create deployment routeguide --image=golang --port=50051 \ - -- bash -c "git clone -b v1.52.0 --depth 1 https://github.com/grpc/grpc-go; cd grpc-go/examples/route_guide; sed -i "''" "'"s/localhost//"'" server/server.go; go run server/server.go" - kubectl expose deployment routeguide --port=50051 - kubectl wait --for=condition=available --timeout=60s deployment/routeguide - - - name: Test gRPC service with grpcurl - run: | - curl -sO https://gist.githubusercontent.com/kalantar/510737f0fd58c0432a08e5b6e45ec97f/raw/524d6660284bf653ce0f29f3a25ed0e913c3df80/grpcurl-routeguide.yaml - kubectl apply -f grpcurl-routeguide.yaml - sleep 180 - kubectl logs deploy/sleep - - - name: Install controller - run: | - helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: load test grpc service - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.call=routeguide.RouteGuide.GetFeature \ - --set grpc.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - - name: load test grpc service with multiple endpoints - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=routeguide \ - --set ready.service=routeguide \ - --set ready.timeout=60s \ - --set grpc.host=routeguide.default:50051 \ - --set grpc.protoURL=https://raw.githubusercontent.com/grpc/grpc-go/v1.52.0/examples/route_guide/routeguide/route_guide.proto \ - --set grpc.endpoints.getFeature.call=routeguide.RouteGuide.GetFeature \ - --set grpc.endpoints.getFeature.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/unary.json \ - --set grpc.endpoints.listFeatures.call=routeguide.RouteGuide.ListFeatures \ - --set grpc.endpoints.listFeatures.dataURL=https://raw.githubusercontent.com/iter8-tools/docs/v0.13.13/samples/grpc-payload/server.json - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - kubernetes-load-test-grpc2: - name: gRPC load test 2 with various URLs - needs: build-push-test-image - runs-on: ubuntu-latest - steps: - - name: Install Go - uses: actions/setup-go@v4 - with: - go-version: 1.19 - - name: Check out code into the Go module directory - uses: actions/checkout@v3 - - name: Build and install Iter8 - run: make install - - - name: Start kind cluster - uses: helm/kind-action@v1.5.0 - with: - wait: 300s - - - name: Create hello application - run: | - kubectl create deploy hello --image=docker.io/grpc/java-example-hostname:latest --port=50051 - kubectl expose deploy hello --port=50051 - - - name: Install controller - run: | - helm install iter8 charts/controller --set image=${{ needs.assets.outputs.IMAGE_TAG }} --set logLevel=trace - kubectl rollout status --watch --timeout=60s statefulset/iter8 - - - name: Expose metrics service - run: | - kubectl port-forward service/iter8 8080:8080 & - - - name: load test grpc service with protoURL - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=hello \ - --set ready.service=hello \ - --set ready.timeout=60s \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set grpc.data.name="frodo" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f - - - name: load test grpc service with proto/data/metadata URLs - run: | - iter8 k launch --localChart --chartName charts/iter8 \ - --set iter8Image=${{ needs.assets.outputs.IMAGE_TAG }} \ - --set "tasks={ready,grpc}" \ - --set ready.deploy=hello \ - --set ready.service=hello \ - --set ready.timeout=60s \ - --set grpc.host="hello.default:50051" \ - --set grpc.call="helloworld.Greeter.SayHello" \ - --set grpc.protoURL="https://raw.githubusercontent.com/grpc/grpc-go/master/examples/helloworld/helloworld/helloworld.proto" \ - --set grpc.dataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" \ - --set grpc.metadataURL="https://gist.githubusercontent.com/sriumcp/3f3178f4b698af6696c925832e51b0ba/raw/d02aa698d34aa2067f7a2f6afb4ceb616b0db822/name.json" - sleep 60 - - - name: Try other iter8 k commands - run: | - iter8 k log - iter8 k delete - - - name: Check GET /grpcDashboard - run: | - curl "http://localhost:8080/grpcDashboard?namespace=default&experiment=default" -f diff --git a/.github/workflows/versionbump.yaml b/.github/workflows/versionbump.yaml index b6bc7bc10..9901e1d8f 100644 --- a/.github/workflows/versionbump.yaml +++ b/.github/workflows/versionbump.yaml @@ -1,88 +1,94 @@ -# name: Version bump check - -# on: -# pull_request: -# branches: -# - master - -# jobs: -# # Get the paths for the Helm charts to version check -# get_paths: -# runs-on: ubuntu-latest - -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: 0 - -# - name: Get the paths for Helm charts to version check -# id: set-matrix -# run: | -# # Get paths (in string form) -# stringPaths=$(find -maxdepth 2 -path './charts/*') - -# # Check paths (length greater than 0) -# stringPathsLength=$(echo ${#stringPaths}) -# if (( stringPathsLength == 0 )); -# then -# echo "No paths to check" -# exit 1 -# fi - -# # Serialize paths into JSON array -# paths=$(jq -ncR '[inputs]' <<< "$stringPaths") -# echo $paths - -# # Output serialized paths -# echo "matrix=$paths" >> $GITHUB_OUTPUT - -# outputs: -# matrix: ${{ steps.set-matrix.outputs.matrix }} - -# # Version check Helm charts based on paths provided by previous job -# version_check: -# name: Version check -# needs: get_paths -# runs-on: ubuntu-latest -# strategy: -# matrix: -# version: ${{ fromJson(needs.get_paths.outputs.matrix) }} -# steps: -# - uses: actions/checkout@v3 -# with: -# fetch-depth: 0 - -# - name: Get modified files in the ${{ matrix.version }} folder -# id: modified-files -# uses: tj-actions/changed-files@v35 -# with: -# files: ${{ matrix.version }} - -# - name: Run step if any file(s) in the ${{ matrix.version }} folder was modified -# if: steps.modified-files.outputs.any_modified == 'true' -# run: | -# # Remove ./ prefix from raw matrix version (i.e. ./charts/iter8 -> charts/iter8) -# version=$(echo ${{ matrix.version }} | sed s/".\/"//) - -# # Get chart file -# chartFile="$version/Chart.yaml" - -# # Get git diff of the Chart.yaml between the master branch and PR branch -# gitDiff=$(git diff origin/master..HEAD -- $chartFile) -# echo $gitDiff - -# # Addition in Chart.yaml -# addChart="+++ b/$add$chartFile" -# echo $addChart - -# # Addition of version in Chart.yaml -# addVersion="+version:" -# echo $addVersion - -# if [[ "$gitDiff" == *"$addChart"* ]] && [[ "$gitDiff" == *$addVersion* ]]; -# then -# echo "version in $chartFile has been modified" -# else -# echo "version in $chartFile needs to be modified" -# exit 1 -# fi \ No newline at end of file +name: Version bump check + +# Only runs when charts have changed + +# Check if the version number of changed charts have been bumped + +on: + pull_request: + branches: + - master + paths: + - charts/** + +jobs: + # Get the paths for the Helm charts to version check + get_paths: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Get the paths for Helm charts to version check + id: set-matrix + run: | + # Get paths (in string form) + stringPaths=$(find -maxdepth 2 -path './charts/*') + + # Check paths (length greater than 0) + stringPathsLength=$(echo ${#stringPaths}) + if (( stringPathsLength == 0 )); + then + echo "No paths to check" + exit 1 + fi + + # Serialize paths into JSON array + paths=$(jq -ncR '[inputs]' <<< "$stringPaths") + echo $paths + + # Output serialized paths + echo "matrix=$paths" >> $GITHUB_OUTPUT + + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + + # Version check Helm charts based on paths provided by previous job + version_check: + name: Version check + needs: get_paths + runs-on: ubuntu-latest + strategy: + matrix: + version: ${{ fromJson(needs.get_paths.outputs.matrix) }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Get modified files in the ${{ matrix.version }} folder + id: modified-files + uses: tj-actions/changed-files@v35 + with: + files: ${{ matrix.version }} + + - name: Run step if any file(s) in the ${{ matrix.version }} folder was modified + if: steps.modified-files.outputs.any_modified == 'true' + run: | + # Remove ./ prefix from raw matrix version (i.e. ./charts/iter8 -> charts/iter8) + version=$(echo ${{ matrix.version }} | sed s/".\/"//) + + # Get chart file + chartFile="$version/Chart.yaml" + + # Get git diff of the Chart.yaml between the master branch and PR branch + gitDiff=$(git diff origin/master..HEAD -- $chartFile) + echo $gitDiff + + # Addition in Chart.yaml + addChart="+++ b/$add$chartFile" + echo $addChart + + # Addition of version in Chart.yaml + addVersion="+version:" + echo $addVersion + + if [[ "$gitDiff" == *"$addChart"* ]] && [[ "$gitDiff" == *$addVersion* ]]; + then + echo "version in $chartFile has been modified" + else + echo "version in $chartFile needs to be modified" + exit 1 + fi \ No newline at end of file From a837fcb89fb1e228c09fbb4dbff43e7e93bd6084 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 14:06:55 -0400 Subject: [PATCH 113/121] Revert chart and kustomize changes Signed-off-by: Alan Cha --- charts/{controller => autox}/.helmignore | 0 charts/autox/Chart.yaml | 19 +++++ .../templates/_helpers.tpl | 6 +- charts/autox/templates/deployment.yaml | 42 +++++++++++ charts/autox/templates/roles.yaml | 69 +++++++++++++++++++ charts/autox/templates/secret.yaml | 37 ++++++++++ charts/autox/templates/serviceaccount.yaml | 6 ++ charts/autox/values.yaml | 13 ++++ charts/iter8/Chart.yaml | 2 +- charts/iter8/templates/_experiment.tpl | 11 +-- charts/iter8/templates/_k-cronjob.tpl | 42 +++++++++++ charts/iter8/templates/_k-job.tpl | 3 - charts/iter8/templates/_task-assess.tpl | 40 +++++++++++ .../iter8/templates/_task-custommetrics.tpl | 6 ++ charts/iter8/templates/_task-github.tpl | 3 + charts/iter8/templates/_task-slack.tpl | 3 + charts/iter8/templates/k8s.yaml | 7 ++ charts/iter8/values.yaml | 10 +-- charts/traffic/.helmignore | 26 +++++++ charts/{controller => traffic}/Chart.yaml | 6 +- charts/traffic/templates/_helpers.tpl | 12 ++++ .../templates/configmap.yaml | 0 .../templates/persistentvolumeclaim.yaml | 2 +- .../templates/roles.yaml | 4 +- .../templates/service.yaml | 2 +- .../templates/serviceaccount.yaml | 2 +- .../templates/statefulset.yaml | 8 +-- .../testdata/values.yaml | 0 charts/{controller => traffic}/values.yaml | 0 .../clusterScoped/kustomization.yaml | 0 .../namespaceScoped/configmap.yaml | 2 +- .../namespaceScoped/kustomization.yaml | 2 +- .../namespaceScoped/pvc.yaml | 0 .../namespaceScoped/role.yaml | 0 .../namespaceScoped/rolebinding.yaml | 0 .../namespaceScoped/service.yaml | 2 +- .../namespaceScoped/serviceaccount.yaml | 0 .../namespaceScoped/statefulset.yaml | 8 +-- 38 files changed, 359 insertions(+), 36 deletions(-) rename charts/{controller => autox}/.helmignore (100%) create mode 100644 charts/autox/Chart.yaml rename charts/{controller => autox}/templates/_helpers.tpl (70%) create mode 100644 charts/autox/templates/deployment.yaml create mode 100644 charts/autox/templates/roles.yaml create mode 100644 charts/autox/templates/secret.yaml create mode 100644 charts/autox/templates/serviceaccount.yaml create mode 100644 charts/autox/values.yaml create mode 100644 charts/iter8/templates/_k-cronjob.tpl create mode 100644 charts/iter8/templates/_task-assess.tpl create mode 100644 charts/iter8/templates/_task-custommetrics.tpl create mode 100644 charts/traffic/.helmignore rename charts/{controller => traffic}/Chart.yaml (83%) create mode 100644 charts/traffic/templates/_helpers.tpl rename charts/{controller => traffic}/templates/configmap.yaml (100%) rename charts/{controller => traffic}/templates/persistentvolumeclaim.yaml (83%) rename charts/{controller => traffic}/templates/roles.yaml (92%) rename charts/{controller => traffic}/templates/service.yaml (79%) rename charts/{controller => traffic}/templates/serviceaccount.yaml (62%) rename charts/{controller => traffic}/templates/statefulset.yaml (88%) rename charts/{controller => traffic}/testdata/values.yaml (100%) rename charts/{controller => traffic}/values.yaml (100%) rename kustomize/{controller => iter8}/clusterScoped/kustomization.yaml (100%) rename kustomize/{controller => iter8}/namespaceScoped/configmap.yaml (96%) rename kustomize/{controller => iter8}/namespaceScoped/kustomization.yaml (82%) rename kustomize/{controller => iter8}/namespaceScoped/pvc.yaml (100%) rename kustomize/{controller => iter8}/namespaceScoped/role.yaml (100%) rename kustomize/{controller => iter8}/namespaceScoped/rolebinding.yaml (100%) rename kustomize/{controller => iter8}/namespaceScoped/service.yaml (82%) rename kustomize/{controller => iter8}/namespaceScoped/serviceaccount.yaml (100%) rename kustomize/{controller => iter8}/namespaceScoped/statefulset.yaml (91%) diff --git a/charts/controller/.helmignore b/charts/autox/.helmignore similarity index 100% rename from charts/controller/.helmignore rename to charts/autox/.helmignore diff --git a/charts/autox/Chart.yaml b/charts/autox/Chart.yaml new file mode 100644 index 000000000..2393e39f0 --- /dev/null +++ b/charts/autox/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +name: autox +version: 0.1.9 +description: Iter8 autoX controller +type: application +keywords: +- Iter8 +- autox +- experiment +home: https://iter8.tools +sources: +- https://github.com/iter8-tools/iter8 +maintainers: +- name: Alan Cha + email: alan.cha1@ibm.com +- name: Iter8 + email: iter8-tools@gmail.com +icon: https://github.com/iter8-tools/iter8/raw/master/mkdocs/docs/images/favicon.png +appVersion: v0.15 diff --git a/charts/controller/templates/_helpers.tpl b/charts/autox/templates/_helpers.tpl similarity index 70% rename from charts/controller/templates/_helpers.tpl rename to charts/autox/templates/_helpers.tpl index 3c50cdff6..9b8154785 100644 --- a/charts/controller/templates/_helpers.tpl +++ b/charts/autox/templates/_helpers.tpl @@ -1,10 +1,10 @@ -{{- define "iter8-controller.name" -}} +{{- define "iter8-autox.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} -{{- define "iter8-controller.labels" -}} +{{- define "iter8-autox.labels" -}} labels: - app.kubernetes.io/name: {{ template "iter8-controller.name" . }} + app.kubernetes.io/name: {{ template "iter8-autox.name" . }} helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/autox/templates/deployment.yaml b/charts/autox/templates/deployment.yaml new file mode 100644 index 000000000..fadb73cd8 --- /dev/null +++ b/charts/autox/templates/deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }} + namespace: argocd + {{ template "iter8-autox.labels" . }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ template "iter8-autox.name" . }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "iter8-autox.name" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + spec: + serviceAccountName: {{ .Release.Name }} + containers: + - name: iter8-autox + image: {{ .Values.image }} + imagePullPolicy: Always + command: ["/bin/iter8"] + args: ["autox", "-l", "{{ .Values.logLevel }}"] + env: + - name: CONFIG + value: /config/config.yaml + volumeMounts: + - name: autox-config + mountPath: "/config" + readOnly: true + resources: + {{ toYaml .Values.resources | indent 10 | trim }} + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumes: + - name: autox-config + secret: + secretName: {{ .Release.Name }} diff --git a/charts/autox/templates/roles.yaml b/charts/autox/templates/roles.yaml new file mode 100644 index 000000000..1a78ca3c7 --- /dev/null +++ b/charts/autox/templates/roles.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $.Release.Name }}-argocd + namespace: argocd + {{ template "iter8-autox.labels" $ }} +rules: +- apiGroups: ["argoproj.io"] + resources: ["applications"] + verbs: ["create", "get", "patch", "delete"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $.Release.Name }}-argocd + namespace: argocd + {{ template "iter8-autox.labels" $ }} +subjects: +- kind: ServiceAccount + name: {{ $.Release.Name }} + namespace: argocd +roleRef: + kind: Role + name: {{ $.Release.Name }}-argocd + apiGroup: rbac.authorization.k8s.io +{{- range $releaseGroupSpecName, $releaseGroupSpec := .Values.groups }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $.Release.Name }}-{{ $releaseGroupSpecName }} + namespace: {{ $releaseGroupSpec.trigger.namespace }} + {{ template "iter8-autox.labels" $ }} +rules: +{{- if eq $releaseGroupSpec.trigger.resource "deployments" }} +- apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["watch", "list", "get"] +{{- end }} +{{- if eq $releaseGroupSpec.trigger.resource "services" }} +- apiGroups: [""] + resources: ["services"] + verbs: ["watch", "list", "get"] +{{- end }} +{{- if eq $releaseGroupSpec.trigger.resource "ksvcs" }} +- apiGroups: ["serving.knative.dev"] + resources: ["services"] + verbs: ["watch", "list", "get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $.Release.Name }}-{{ $releaseGroupSpecName }} + namespace: {{ $releaseGroupSpec.trigger.namespace }} + {{ template "iter8-autox.labels" $ }} +subjects: +- kind: ServiceAccount + name: {{ $.Release.Name }} + namespace: argocd +roleRef: + kind: Role + name: {{ $.Release.Name }}-{{ $releaseGroupSpecName }} + apiGroup: rbac.authorization.k8s.io +{{- end }} \ No newline at end of file diff --git a/charts/autox/templates/secret.yaml b/charts/autox/templates/secret.yaml new file mode 100644 index 000000000..4792ba90e --- /dev/null +++ b/charts/autox/templates/secret.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }} + namespace: argocd + {{ template "iter8-autox.labels" . }} +stringData: + config.yaml: | + specs: + {{- range $releaseGroupSpecName, $releaseGroupSpec := .Values.groups }} + {{ $releaseGroupSpecName }}: + trigger: + group: {{ $releaseGroupSpec.trigger.group }} + version: {{ $releaseGroupSpec.trigger.version }} + resource: {{ $releaseGroupSpec.trigger.resource }} + name: {{ $releaseGroupSpec.trigger.name }} + namespace: {{ $releaseGroupSpec.trigger.namespace }} + releaseSpecs: + {{- range $releaseSpecName, $releaseSpec := $releaseGroupSpec.specs }} + {{ $releaseSpecName }}: + name: {{ $releaseSpec.name }} + values: + {{ toYaml $releaseSpec.values | indent 14 | trim }} + version: {{ $releaseSpec.version }} + {{- end }} + {{- end }} +{{- range $releaseGroupSpecName, $releaseGroupSpec := .Values.groups }} +{{ $hash := print $releaseGroupSpec | sha256sum | trunc 5 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: autox-{{ $releaseGroupSpecName }}-{{ $hash }} + namespace: argocd + labels: + iter8.tools/autox-group: {{ $releaseGroupSpecName }} +{{- end }} \ No newline at end of file diff --git a/charts/autox/templates/serviceaccount.yaml b/charts/autox/templates/serviceaccount.yaml new file mode 100644 index 000000000..a9a443e44 --- /dev/null +++ b/charts/autox/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }} + namespace: argocd + {{ template "iter8-autox.labels" . }} diff --git a/charts/autox/values.yaml b/charts/autox/values.yaml new file mode 100644 index 000000000..b1ea5f199 --- /dev/null +++ b/charts/autox/values.yaml @@ -0,0 +1,13 @@ +### AutoX service image +image: iter8/iter8:0.15 + +logLevel: info + +### Resource limits +resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" \ No newline at end of file diff --git a/charts/iter8/Chart.yaml b/charts/iter8/Chart.yaml index 497ff0186..ef75e238c 100644 --- a/charts/iter8/Chart.yaml +++ b/charts/iter8/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: iter8 -version: 0.15.2 +version: 0.15.1 description: Iter8 experiment chart type: application home: https://iter8.tools diff --git a/charts/iter8/templates/_experiment.tpl b/charts/iter8/templates/_experiment.tpl index 05b8ca7a0..9ba716932 100644 --- a/charts/iter8/templates/_experiment.tpl +++ b/charts/iter8/templates/_experiment.tpl @@ -2,12 +2,13 @@ {{- if not .Values.tasks }} {{- fail ".Values.tasks is empty" }} {{- end }} -metadata: - name: {{ .Release.Name }} - namespace: {{ .Release.Namespace }} spec: {{- range .Values.tasks }} - {{- if eq "grpc" . }} + {{- if eq "assess" . }} + {{- include "task.assess" $.Values.assess -}} + {{- else if eq "custommetrics" . }} + {{- include "task.custommetrics" $.Values.custommetrics -}} + {{- else if eq "grpc" . }} {{- include "task.grpc" $.Values.grpc -}} {{- else if eq "http" . }} {{- include "task.http" $.Values.http -}} @@ -18,7 +19,7 @@ spec: {{- else if eq "github" . }} {{- include "task.github" $.Values.github -}} {{- else }} - {{- fail "task name must be one of grpc, http, ready, github, or slack" -}} + {{- fail "task name must be one of assess, custommetrics, grpc, http, ready, github, or slack" -}} {{- end }} {{- end }} result: diff --git a/charts/iter8/templates/_k-cronjob.tpl b/charts/iter8/templates/_k-cronjob.tpl new file mode 100644 index 000000000..40beedb7b --- /dev/null +++ b/charts/iter8/templates/_k-cronjob.tpl @@ -0,0 +1,42 @@ +{{- define "k.cronjob" -}} +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ .Release.Name }}-{{ .Release.Revision }}-cronjob + annotations: + iter8.tools/group: {{ .Release.Name }} + iter8.tools/revision: {{ .Release.Revision | quote }} +spec: + schedule: {{ .Values.cronjobSchedule | quote }} + concurrencyPolicy: Forbid + jobTemplate: + spec: + template: + metadata: + labels: + iter8.tools/group: {{ .Release.Name }} + annotations: + sidecar.istio.io/inject: "false" + spec: + serviceAccountName: {{ default (printf "%s-iter8-sa" .Release.Name) .Values.serviceAccountName }} + containers: + - name: iter8 + image: {{ .Values.iter8Image }} + imagePullPolicy: Always + command: + - "/bin/sh" + - "-c" + - | + iter8 k run --namespace {{ .Release.Namespace }} --group {{ .Release.Name }} -l {{ .Values.logLevel }} --reuseResult + resources: + {{ toYaml .Values.resources | indent 14 | trim }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 1001040000 + restartPolicy: Never + backoffLimit: 0 +{{- end }} diff --git a/charts/iter8/templates/_k-job.tpl b/charts/iter8/templates/_k-job.tpl index 59b906ca8..aac373cdc 100644 --- a/charts/iter8/templates/_k-job.tpl +++ b/charts/iter8/templates/_k-job.tpl @@ -33,9 +33,6 @@ spec: - ALL runAsNonRoot: true runAsUser: 1001040000 - env: - - name: METRICS_SERVER_URL - value: "{{ .Values.metricsServerURL }}" restartPolicy: Never backoffLimit: 0 {{- end }} diff --git a/charts/iter8/templates/_task-assess.tpl b/charts/iter8/templates/_task-assess.tpl new file mode 100644 index 000000000..8b3e92019 --- /dev/null +++ b/charts/iter8/templates/_task-assess.tpl @@ -0,0 +1,40 @@ +{{- define "task.assess" -}} +{{- if . }} +# task: validate service level objectives for app using +# the metrics collected in an earlier task +- task: assess + with: +{{- if .SLOs }} + SLOs: +{{- if .SLOs.upper }} + upper: +{{- range $m, $l := .SLOs.upper }} + - metric: {{ $m }} + limit: {{ $l }} +{{- end }} +{{- end }} +{{- if .SLOs.lower }} + lower: +{{- range $m, $l := .SLOs.lower }} + - metric: {{ $m }} + limit: {{ $l }} +{{- end }} +{{- end }} +{{- end }} +{{- if .rewards }} + rewards: +{{- if .rewards.max }} + max: +{{- range $r, $val := .rewards.max }} + - {{ $val }} +{{- end }} +{{- end }} +{{- if .rewards.min }} + min: +{{- range $r, $val := .rewards.min }} + - {{ $val }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/iter8/templates/_task-custommetrics.tpl b/charts/iter8/templates/_task-custommetrics.tpl new file mode 100644 index 000000000..189015b16 --- /dev/null +++ b/charts/iter8/templates/_task-custommetrics.tpl @@ -0,0 +1,6 @@ +{{- define "task.custommetrics" }} +# task: collect custom metrics from providers (databases) +- task: custommetrics + with: +{{ . | toYaml | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/iter8/templates/_task-github.tpl b/charts/iter8/templates/_task-github.tpl index 34cf4e9ce..8ee103d02 100644 --- a/charts/iter8/templates/_task-github.tpl +++ b/charts/iter8/templates/_task-github.tpl @@ -14,6 +14,9 @@ {{- end }} # task: send a GitHub notification - task: notify +{{- if .if }} + if: {{ .if | quote }} +{{- end }} with: url: https://api.github.com/repos/{{ .owner }}/{{ .repo }}/dispatches method: POST diff --git a/charts/iter8/templates/_task-slack.tpl b/charts/iter8/templates/_task-slack.tpl index 0d4867c9d..51233a62d 100644 --- a/charts/iter8/templates/_task-slack.tpl +++ b/charts/iter8/templates/_task-slack.tpl @@ -8,6 +8,9 @@ {{- end }} # task: send a Slack notification - task: notify +{{- if .if }} + if: {{ .if | quote }} +{{- end }} with: url: {{ .url }} method: POST diff --git a/charts/iter8/templates/k8s.yaml b/charts/iter8/templates/k8s.yaml index fe530c488..2b391f1b5 100644 --- a/charts/iter8/templates/k8s.yaml +++ b/charts/iter8/templates/k8s.yaml @@ -8,4 +8,11 @@ {{ include "k.rolebinding" . }} {{- end}} --- +{{- if eq "job" .Values.runner }} {{ include "k.job" . }} +{{- else if eq "cronjob" .Values.runner }} +{{ include "k.cronjob" . }} +{{- else if eq "none" .Values.runner }} +{{- else }} +{{- fail "runner must be one of job, cronjob, or none" }} +{{- end }} diff --git a/charts/iter8/values.yaml b/charts/iter8/values.yaml index 9b8a175b5..0344d00d0 100644 --- a/charts/iter8/values.yaml +++ b/charts/iter8/values.yaml @@ -4,16 +4,16 @@ iter8Image: iter8/iter8:0.15 ### majorMinor is the minor version of Iter8 majorMinor: v0.15 +### runner for Kubernetes experiments may be job, cronjob, or none +runner: none + logLevel: info -### resources are the resource limits for the pods +### Resource limits resources: requests: memory: "64Mi" cpu: "250m" limits: memory: "128Mi" - cpu: "500m" - -### metricsServerURL is the URL to the Metrics server -metricsServerURL: http://iter8.default:8080 \ No newline at end of file + cpu: "500m" \ No newline at end of file diff --git a/charts/traffic/.helmignore b/charts/traffic/.helmignore new file mode 100644 index 000000000..95fe8a639 --- /dev/null +++ b/charts/traffic/.helmignore @@ -0,0 +1,26 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ + +# generated files need to be ignored +experiment.yaml \ No newline at end of file diff --git a/charts/controller/Chart.yaml b/charts/traffic/Chart.yaml similarity index 83% rename from charts/controller/Chart.yaml rename to charts/traffic/Chart.yaml index 1737275e8..c9c811a74 100644 --- a/charts/controller/Chart.yaml +++ b/charts/traffic/Chart.yaml @@ -1,11 +1,11 @@ apiVersion: v2 -name: controller +name: traffic version: 0.1.10 -description: Iter8 controller controller +description: Iter8 traffic controller type: application keywords: - Iter8 -- controller +- traffic - experiment home: https://iter8.tools sources: diff --git a/charts/traffic/templates/_helpers.tpl b/charts/traffic/templates/_helpers.tpl new file mode 100644 index 000000000..382221e2d --- /dev/null +++ b/charts/traffic/templates/_helpers.tpl @@ -0,0 +1,12 @@ +{{- define "iter8-traffic.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "iter8-traffic.labels" -}} + labels: + app.kubernetes.io/name: {{ template "iter8-traffic.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} diff --git a/charts/controller/templates/configmap.yaml b/charts/traffic/templates/configmap.yaml similarity index 100% rename from charts/controller/templates/configmap.yaml rename to charts/traffic/templates/configmap.yaml diff --git a/charts/controller/templates/persistentvolumeclaim.yaml b/charts/traffic/templates/persistentvolumeclaim.yaml similarity index 83% rename from charts/controller/templates/persistentvolumeclaim.yaml rename to charts/traffic/templates/persistentvolumeclaim.yaml index 34ef849b6..fbe540a77 100644 --- a/charts/controller/templates/persistentvolumeclaim.yaml +++ b/charts/traffic/templates/persistentvolumeclaim.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: name: {{ .Release.Name }} - {{ template "iter8-controller.labels" . }} + {{ template "iter8-traffic.labels" . }} spec: accessModes: - ReadWriteOnce diff --git a/charts/controller/templates/roles.yaml b/charts/traffic/templates/roles.yaml similarity index 92% rename from charts/controller/templates/roles.yaml rename to charts/traffic/templates/roles.yaml index ef8a63540..f236a6133 100644 --- a/charts/controller/templates/roles.yaml +++ b/charts/traffic/templates/roles.yaml @@ -6,7 +6,7 @@ kind: Role {{- end }} metadata: name: {{ $.Release.Name }} - {{ template "iter8-controller.labels" $ }} + {{ template "iter8-traffic.labels" $ }} rules: {{- range $typeName, $type := .Values.resourceTypes }} {{- if not $type.Resource }} @@ -31,7 +31,7 @@ kind: RoleBinding {{- end }} metadata: name: {{ $.Release.Name }} - {{ template "iter8-controller.labels" $ }} + {{ template "iter8-traffic.labels" $ }} subjects: - kind: ServiceAccount name: {{ $.Release.Name }} diff --git a/charts/controller/templates/service.yaml b/charts/traffic/templates/service.yaml similarity index 79% rename from charts/controller/templates/service.yaml rename to charts/traffic/templates/service.yaml index ab8b8d2d1..3838ca9c4 100644 --- a/charts/controller/templates/service.yaml +++ b/charts/traffic/templates/service.yaml @@ -4,7 +4,7 @@ metadata: name: {{ .Release.Name }} spec: selector: - app.kubernetes.io/name: {{ template "iter8-controller.name" . }} + app.kubernetes.io/name: {{ template "iter8-traffic.name" . }} ports: - name: grpc port: {{ .Values.abn.port }} diff --git a/charts/controller/templates/serviceaccount.yaml b/charts/traffic/templates/serviceaccount.yaml similarity index 62% rename from charts/controller/templates/serviceaccount.yaml rename to charts/traffic/templates/serviceaccount.yaml index 28f4db508..81bc37728 100644 --- a/charts/controller/templates/serviceaccount.yaml +++ b/charts/traffic/templates/serviceaccount.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ .Release.Name }} - {{ template "iter8-controller.labels" . }} \ No newline at end of file + {{ template "iter8-traffic.labels" . }} \ No newline at end of file diff --git a/charts/controller/templates/statefulset.yaml b/charts/traffic/templates/statefulset.yaml similarity index 88% rename from charts/controller/templates/statefulset.yaml rename to charts/traffic/templates/statefulset.yaml index e2d1be856..e6a565e20 100644 --- a/charts/controller/templates/statefulset.yaml +++ b/charts/traffic/templates/statefulset.yaml @@ -2,21 +2,21 @@ apiVersion: apps/v1 kind: StatefulSet metadata: name: {{ .Release.Name }} - {{ template "iter8-controller.labels" . }} + {{ template "iter8-traffic.labels" . }} spec: serviceName: {{ .Release.Name }} selector: matchLabels: - app.kubernetes.io/name: {{ template "iter8-controller.name" . }} + app.kubernetes.io/name: {{ template "iter8-traffic.name" . }} template: metadata: labels: - app.kubernetes.io/name: {{ template "iter8-controller.name" . }} + app.kubernetes.io/name: {{ template "iter8-traffic.name" . }} spec: terminationGracePeriodSeconds: 10 serviceAccountName: {{ .Release.Name }} containers: - - name: iter8-controller + - name: iter8-traffic image: {{ .Values.image }} imagePullPolicy: Always command: ["/bin/iter8"] diff --git a/charts/controller/testdata/values.yaml b/charts/traffic/testdata/values.yaml similarity index 100% rename from charts/controller/testdata/values.yaml rename to charts/traffic/testdata/values.yaml diff --git a/charts/controller/values.yaml b/charts/traffic/values.yaml similarity index 100% rename from charts/controller/values.yaml rename to charts/traffic/values.yaml diff --git a/kustomize/controller/clusterScoped/kustomization.yaml b/kustomize/iter8/clusterScoped/kustomization.yaml similarity index 100% rename from kustomize/controller/clusterScoped/kustomization.yaml rename to kustomize/iter8/clusterScoped/kustomization.yaml diff --git a/kustomize/controller/namespaceScoped/configmap.yaml b/kustomize/iter8/namespaceScoped/configmap.yaml similarity index 96% rename from kustomize/controller/namespaceScoped/configmap.yaml rename to kustomize/iter8/namespaceScoped/configmap.yaml index 8e1fcce0b..47e6c1db1 100644 --- a/kustomize/controller/namespaceScoped/configmap.yaml +++ b/kustomize/iter8/namespaceScoped/configmap.yaml @@ -5,7 +5,7 @@ metadata: data: config.yaml: | defaultResync: 15m - image: iter8/iter8:0.16 + image: iter8/iter8:0.15 logLevel: info resourceTypes: cm: diff --git a/kustomize/controller/namespaceScoped/kustomization.yaml b/kustomize/iter8/namespaceScoped/kustomization.yaml similarity index 82% rename from kustomize/controller/namespaceScoped/kustomization.yaml rename to kustomize/iter8/namespaceScoped/kustomization.yaml index 09f3fd598..e356ac9ea 100644 --- a/kustomize/controller/namespaceScoped/kustomization.yaml +++ b/kustomize/iter8/namespaceScoped/kustomization.yaml @@ -8,5 +8,5 @@ resources: - statefulset.yaml commonLabels: - app.kubernetes.io/name: controller + app.kubernetes.io/name: traffic app.kubernetes.io/version: v0.15 diff --git a/kustomize/controller/namespaceScoped/pvc.yaml b/kustomize/iter8/namespaceScoped/pvc.yaml similarity index 100% rename from kustomize/controller/namespaceScoped/pvc.yaml rename to kustomize/iter8/namespaceScoped/pvc.yaml diff --git a/kustomize/controller/namespaceScoped/role.yaml b/kustomize/iter8/namespaceScoped/role.yaml similarity index 100% rename from kustomize/controller/namespaceScoped/role.yaml rename to kustomize/iter8/namespaceScoped/role.yaml diff --git a/kustomize/controller/namespaceScoped/rolebinding.yaml b/kustomize/iter8/namespaceScoped/rolebinding.yaml similarity index 100% rename from kustomize/controller/namespaceScoped/rolebinding.yaml rename to kustomize/iter8/namespaceScoped/rolebinding.yaml diff --git a/kustomize/controller/namespaceScoped/service.yaml b/kustomize/iter8/namespaceScoped/service.yaml similarity index 82% rename from kustomize/controller/namespaceScoped/service.yaml rename to kustomize/iter8/namespaceScoped/service.yaml index 19c66f0ff..3c1cc4698 100644 --- a/kustomize/controller/namespaceScoped/service.yaml +++ b/kustomize/iter8/namespaceScoped/service.yaml @@ -5,7 +5,7 @@ metadata: spec: clusterIP: None selector: - app.kubernetes.io/name: controller + app.kubernetes.io/name: traffic ports: - name: grpc port: 50051 diff --git a/kustomize/controller/namespaceScoped/serviceaccount.yaml b/kustomize/iter8/namespaceScoped/serviceaccount.yaml similarity index 100% rename from kustomize/controller/namespaceScoped/serviceaccount.yaml rename to kustomize/iter8/namespaceScoped/serviceaccount.yaml diff --git a/kustomize/controller/namespaceScoped/statefulset.yaml b/kustomize/iter8/namespaceScoped/statefulset.yaml similarity index 91% rename from kustomize/controller/namespaceScoped/statefulset.yaml rename to kustomize/iter8/namespaceScoped/statefulset.yaml index 2ad1016f2..293e35872 100644 --- a/kustomize/controller/namespaceScoped/statefulset.yaml +++ b/kustomize/iter8/namespaceScoped/statefulset.yaml @@ -6,17 +6,17 @@ spec: serviceName: iter8 selector: matchLabels: - app.kubernetes.io/name: controller + app.kubernetes.io/name: traffic template: metadata: labels: - app.kubernetes.io/name: controller + app.kubernetes.io/name: traffic spec: terminationGracePeriodSeconds: 10 serviceAccountName: iter8 containers: - - name: iter8-controller - image: iter8/iter8:0.16 + - name: iter8-traffic + image: iter8/iter8:0.15 imagePullPolicy: Always command: ["/bin/iter8"] args: ["controllers", "-l", "info"] From 7b7aee098cf196ea22cd6da9c1a5fe3b02190bd2 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 14:13:44 -0400 Subject: [PATCH 114/121] Fix paths Signed-off-by: Alan Cha --- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/linkcheck.yaml | 2 +- .github/workflows/testcode.yaml | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index a62cf3c45..1ff27440d 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -9,7 +9,7 @@ on: branches: - master paths: - - **.go + - '**.go' permissions: contents: read diff --git a/.github/workflows/linkcheck.yaml b/.github/workflows/linkcheck.yaml index 9057cf83f..5e7a21f95 100644 --- a/.github/workflows/linkcheck.yaml +++ b/.github/workflows/linkcheck.yaml @@ -9,7 +9,7 @@ on: branches: - master paths: - - **.md + - '**.md' schedule: - cron: "0 0 1 * *" diff --git a/.github/workflows/testcode.yaml b/.github/workflows/testcode.yaml index 03279b720..8a75fe743 100644 --- a/.github/workflows/testcode.yaml +++ b/.github/workflows/testcode.yaml @@ -7,10 +7,10 @@ name: Performance tests to assess the functionality of the latest version of Ite on: pull_request: branches: - - master + - master paths: - - **.go - - testdata/** + - '**.go' + - 'testdata/**' jobs: unit-test: From 98dbfd2178762ad3b9f6474f41f11ec9971e790b Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 14:29:21 -0400 Subject: [PATCH 115/121] Revert version to v0.15 Signed-off-by: Alan Cha --- base/util.go | 4 ++-- testdata/controllers/blue-green-http-kserve/initialize.sh | 2 +- testdata/controllers/canary-http-kserve/initialize.sh | 2 +- testdata/controllers/mirror-grpc-kserve/initialize.sh | 2 +- testdata/controllers/mirror/default-routing.sh | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/base/util.go b/base/util.go index 2b8aee1dd..a93f8e0af 100644 --- a/base/util.go +++ b/base/util.go @@ -19,11 +19,11 @@ import ( // MajorMinor is the minor version of Iter8 // set this manually whenever the major or minor version changes -var MajorMinor = "v0.16" +var MajorMinor = "v0.15" // Version is the semantic version of Iter8 (with the `v` prefix) // Version is intended to be set using LDFLAGS at build time -var Version = "v0.16.0" +var Version = "v0.15.0" const ( toYAMLString = "toYaml" diff --git a/testdata/controllers/blue-green-http-kserve/initialize.sh b/testdata/controllers/blue-green-http-kserve/initialize.sh index b82424580..553af76b1 100755 --- a/testdata/controllers/blue-green-http-kserve/initialize.sh +++ b/testdata/controllers/blue-green-http-kserve/initialize.sh @@ -32,7 +32,7 @@ metadata: labels: app.kubernetes.io/managed-by: iter8 iter8.tools/kind: routemap - iter8.tools/version: v0.16 + iter8.tools/version: v0.15 data: strSpec: | versions: diff --git a/testdata/controllers/canary-http-kserve/initialize.sh b/testdata/controllers/canary-http-kserve/initialize.sh index 1efd57143..388250c93 100755 --- a/testdata/controllers/canary-http-kserve/initialize.sh +++ b/testdata/controllers/canary-http-kserve/initialize.sh @@ -32,7 +32,7 @@ metadata: labels: app.kubernetes.io/managed-by: iter8 iter8.tools/kind: routemap - iter8.tools/version: v0.16 + iter8.tools/version: v0.15 data: strSpec: | versions: diff --git a/testdata/controllers/mirror-grpc-kserve/initialize.sh b/testdata/controllers/mirror-grpc-kserve/initialize.sh index 21cd46d97..1b62b0f0a 100755 --- a/testdata/controllers/mirror-grpc-kserve/initialize.sh +++ b/testdata/controllers/mirror-grpc-kserve/initialize.sh @@ -43,7 +43,7 @@ metadata: labels: app.kubernetes.io/managed-by: iter8 iter8.tools/kind: routemap - iter8.tools/version: v0.16 + iter8.tools/version: v0.15 data: strSpec: | versions: diff --git a/testdata/controllers/mirror/default-routing.sh b/testdata/controllers/mirror/default-routing.sh index 20b72f341..8e1c28de5 100755 --- a/testdata/controllers/mirror/default-routing.sh +++ b/testdata/controllers/mirror/default-routing.sh @@ -49,7 +49,7 @@ metadata: labels: app.kubernetes.io/managed-by: iter8 iter8.tools/kind: routemap - iter8.tools/version: v0.16 + iter8.tools/version: v0.15 data: strSpec: | versions: From d7a3b71997404e163175d571fa3f03262385e567 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 14:48:37 -0400 Subject: [PATCH 116/121] Fix test Signed-off-by: Alan Cha --- action/run_test.go | 11 +++-------- base/experiment.go | 1 - 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/action/run_test.go b/action/run_test.go index a1e63069b..e6da28b44 100644 --- a/action/run_test.go +++ b/action/run_test.go @@ -57,6 +57,9 @@ func TestKubeRun(t *testing.T) { err = json.Unmarshal(body, &bodyExperimentResult) assert.NoError(t, err) assert.NotNil(t, body) + + // no experiment failure + assert.False(t, bodyExperimentResult.Failure) }, }) @@ -83,12 +86,4 @@ func TestKubeRun(t *testing.T) { // sanity check -- handler was called assert.True(t, verifyHandlerCalled) assert.True(t, metricsServerCalled) - - // check results - exp, err := base.BuildExperiment(rOpts.KubeDriver) - assert.NoError(t, err) - assert.True(t, exp.Completed()) - assert.True(t, exp.NoFailure()) - assert.Equal(t, 1, exp.Result.NumCompletedTasks) - } diff --git a/base/experiment.go b/base/experiment.go index cbfa2dbb3..3429a8a08 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -257,7 +257,6 @@ type Driver interface { // Read the experiment Read() (*Experiment, error) - // deprecated // Write the experiment Write(e *Experiment) error From ba2939a2f24e74c3e3f34cb4e6f02a9f16388776 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 14:49:21 -0400 Subject: [PATCH 117/121] Remove extraneous logging Signed-off-by: Alan Cha --- base/experiment.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/base/experiment.go b/base/experiment.go index 3429a8a08..bdbb902e2 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -405,8 +405,5 @@ func RunExperiment(driver Driver) error { exp.initResults(driver.GetRevision()) - result, _ = json.Marshal(exp.Result) - log.Logger.Trace("initialized result", string(result)) - return exp.run(driver) } From e3908166c4557bad495ab58c3a488a6c08718095 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 14:52:55 -0400 Subject: [PATCH 118/121] Remove extaneous logging Signed-off-by: Alan Cha --- base/experiment.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/base/experiment.go b/base/experiment.go index bdbb902e2..5105903df 100644 --- a/base/experiment.go +++ b/base/experiment.go @@ -400,9 +400,6 @@ func RunExperiment(driver Driver) error { return err } - result, _ := json.Marshal(exp.Result) - log.Logger.Trace("Initializing result", string(result)) - exp.initResults(driver.GetRevision()) return exp.run(driver) From 4ed26ac69f342910323510ea43e5cdf00b57c2b3 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 15:18:50 -0400 Subject: [PATCH 119/121] Update README.md Signed-off-by: Alan Cha --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 2a5201e21..3d289c994 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,8 @@ Iter8 supports the following use-cases. 1. Performance testing of HTTP services. 2. Performance testing of gRPC services. +3. A/B/n testing of applications and ML models +4. Reliable and automated traffic engineering: blue-green and canary ## :rocket: Iter8 experiment @@ -23,6 +25,8 @@ Iter8 packs a number of powerful features that facilitate Kubernetes app testing 1. **Generating load and collecting built-in metrics for HTTP and gRPC services.** Simplifies performance testing by eliminating the need to setup and use metrics databases. 2. **Readiness check.** The performance testing portion of the experiment begins only after the service is ready. 3. **Experiment anywhere.** Iter8 experiments can be launched inside a Kubernetes cluster, in local environments, or inside a GitHub Actions pipeline. +4. **Traffic controller.** Automatically and dynamically reconfigures routing resources based on the state of Kubernetes apps/ML models. +5. **Client-side SDK.** Facilitates routing and metrics collection task associated with distributed (i.e., client-server architecture-based) A/B/n testing in Kubernetes. Please see [https://iter8.tools](https://iter8.tools) for the complete documentation. From e64e606bc8cf0bc78acdb2653b2a204bffbe9084 Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 15:20:49 -0400 Subject: [PATCH 120/121] Update wordlist Signed-off-by: Alan Cha --- .github/wordlist.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/wordlist.txt b/.github/wordlist.txt index 95a4c7849..e12ab62b1 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -167,4 +167,5 @@ Shubham Sood Toolchains jetic -Öztürk \ No newline at end of file +Öztürk +reconfigures \ No newline at end of file From 0939ce66d20ab20169df594fcaeb05968c79167d Mon Sep 17 00:00:00 2001 From: Alan Cha Date: Thu, 24 Aug 2023 15:30:34 -0400 Subject: [PATCH 121/121] Change name Signed-off-by: Alan Cha --- .github/workflows/testcode.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/testcode.yaml b/.github/workflows/testcode.yaml index 8a75fe743..d13bd2a82 100644 --- a/.github/workflows/testcode.yaml +++ b/.github/workflows/testcode.yaml @@ -1,4 +1,4 @@ -name: Performance tests to assess the functionality of the latest version of Iter8 (master branch) +name: Check changes to golang code does not break CLI # Only runs when golang code (or test cases) have changed @@ -14,7 +14,7 @@ on: jobs: unit-test: - name: Unit test + name: unit-test runs-on: ubuntu-latest steps: - name: Install Go