diff --git a/CHANGELOG.md b/CHANGELOG.md index 50d858a6..051a2a02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ listed in the changelog. - Create and use one PVC per repository ([#160](https://github.com/opendevstack/ods-pipeline/issues/160)) - Leaner NodeJS 16 Typescript image task, removed cypress and its dependencies ([#426](https://github.com/opendevstack/ods-pipeline/issues/426)) - Update skopeo (from 1.4 to 1.5) and buildah (from 1.22 to 1.23) ([#430](https://github.com/opendevstack/ods-pipeline/issues/430)) +- Prune pipelines and pipeline runs ([#153](https://github.com/opendevstack/ods-pipeline/issues/153)) ### Fixed - Cannot enable debug mode in some tasks ([#377](https://github.com/opendevstack/ods-pipeline/issues/377)) diff --git a/cmd/start/main.go b/cmd/start/main.go index 35f742e1..9b970acb 100644 --- a/cmd/start/main.go +++ b/cmd/start/main.go @@ -200,7 +200,7 @@ func main() { } } - if len(ctxt.Environment) > 0 { + if ctxt.Environment != "" { env, err := odsConfig.Environment(ctxt.Environment) if err != nil { log.Fatal(fmt.Sprintf("err during namespace extraction: %s", err)) diff --git a/cmd/webhook-interceptor/main.go b/cmd/webhook-interceptor/main.go index ac033b25..4ca2ef79 100644 --- a/cmd/webhook-interceptor/main.go +++ b/cmd/webhook-interceptor/main.go @@ -7,6 +7,7 @@ import ( "math/rand" "net/http" "os" + "strconv" "strings" "time" @@ -14,6 +15,7 @@ import ( kubernetesClient "github.com/opendevstack/pipeline/internal/kubernetes" tektonClient "github.com/opendevstack/pipeline/internal/tekton" "github.com/opendevstack/pipeline/pkg/bitbucket" + "github.com/opendevstack/pipeline/pkg/logging" ) const ( @@ -29,6 +31,10 @@ const ( storageClassNameDefault = "standard" storageSizeEnvVar = "ODS_STORAGE_SIZE" storageSizeDefault = "2Gi" + pruneMinKeepHoursEnvVar = "ODS_PRUNE_MIN_KEEP_HOURS" + pruneMinKeepHoursDefault = 48 + pruneMaxKeepRunsEnvVar = "ODS_PRUNE_MAX_KEEP_RUNS" + pruneMaxKeepRunsDefault = 20 ) func init() { @@ -55,55 +61,27 @@ func serve() error { return fmt.Errorf("%s must be set", tokenEnvVar) } - taskKind := os.Getenv(taskKindEnvVar) - if taskKind == "" { - taskKind = taskKindDefault - log.Println( - "INFO:", - taskKindEnvVar, - "not set, using default value:", - taskKindDefault, - ) - } + taskKind := readStringFromEnvVar(taskKindEnvVar, taskKindDefault) - taskSuffix := os.Getenv(taskSuffixEnvVar) - if taskSuffix == "" { - log.Println( - "INFO:", - taskSuffixEnvVar, - "not set, using no suffix", - ) - } + taskSuffix := readStringFromEnvVar(taskSuffixEnvVar, "") - storageProvisioner := os.Getenv(storageProvisionerEnvVar) - if storageProvisioner == "" { - log.Println( - "INFO:", - storageProvisionerEnvVar, - "not set, using no storage provisioner", - ) - } + storageProvisioner := readStringFromEnvVar(storageProvisionerEnvVar, "") - storageClassName := os.Getenv(storageClassNameEnvVar) - if storageClassName == "" { - storageClassName = storageClassNameDefault - log.Println( - "INFO:", - storageClassNameEnvVar, - "not set, using default value:", - storageClassNameDefault, - ) - } + storageClassName := readStringFromEnvVar(storageClassNameEnvVar, storageClassNameDefault) - storageSize := os.Getenv(storageSizeEnvVar) - if storageSize == "" { - storageSize = storageSizeDefault - log.Println( - "INFO:", - storageSizeEnvVar, - "not set, using default value:", - storageSizeDefault, - ) + storageSize := readStringFromEnvVar(storageSizeEnvVar, storageSizeDefault) + + pruneMinKeepHours, err := readIntFromEnvVar( + pruneMinKeepHoursEnvVar, pruneMinKeepHoursDefault, + ) + if err != nil { + return err + } + pruneMaxKeepRuns, err := readIntFromEnvVar( + pruneMaxKeepRunsEnvVar, pruneMaxKeepRunsDefault, + ) + if err != nil { + return err } namespace, err := getFileContent(namespaceFile) @@ -135,6 +113,21 @@ func serve() error { BaseURL: strings.TrimSuffix(repoBase, "/scm"), }) + // TODO: Use this logger in the interceptor as well, not just in the pruner. + var logger logging.LeveledLoggerInterface + if os.Getenv("DEBUG") == "true" { + logger = &logging.LeveledLogger{Level: logging.LevelDebug} + } else { + logger = &logging.LeveledLogger{Level: logging.LevelInfo} + } + + pruner := interceptor.NewPipelineRunPrunerByStage( + tClient, + logger, + pruneMinKeepHours, + pruneMaxKeepRuns, + ) + server, err := interceptor.NewServer(interceptor.ServerConfig{ Namespace: namespace, Project: project, @@ -147,9 +140,10 @@ func serve() error { ClassName: storageClassName, Size: storageSize, }, - KubernetesClient: kClient, - TektonClient: tClient, - BitbucketClient: bitbucketClient, + KubernetesClient: kClient, + TektonClient: tClient, + BitbucketClient: bitbucketClient, + PipelineRunPruner: pruner, }) if err != nil { return err @@ -178,3 +172,32 @@ func getFileContent(filename string) (string, error) { } return string(content), nil } + +func readIntFromEnvVar(envVar string, fallback int) (int, error) { + var val int + valString := os.Getenv(envVar) + if valString == "" { + val = fallback + log.Println( + "INFO:", envVar, "not set, using default value:", fallback, + ) + } else { + i, err := strconv.Atoi(valString) + if err != nil { + return 0, fmt.Errorf("could not read value of %s: %s", envVar, err) + } + val = i + } + return val, nil +} + +func readStringFromEnvVar(envVar, fallback string) string { + val := os.Getenv(envVar) + if val == "" { + val = fallback + log.Printf( + "INFO: %s not set, using default value: '%s'", envVar, fallback, + ) + } + return val +} diff --git a/deploy/cd-namespace/chart/templates/deployment-interceptor.yaml b/deploy/cd-namespace/chart/templates/deployment-interceptor.yaml index a632ce97..aab951da 100644 --- a/deploy/cd-namespace/chart/templates/deployment-interceptor.yaml +++ b/deploy/cd-namespace/chart/templates/deployment-interceptor.yaml @@ -37,12 +37,21 @@ spec: secretKeyRef: key: password name: ods-bitbucket-auth + - name: DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: ods-pipeline - name: ODS_STORAGE_PROVISIONER value: '{{.Values.interceptor.storageProvisioner}}' - name: ODS_STORAGE_CLASS_NAME value: '{{.Values.interceptor.storageClassName}}' - name: ODS_STORAGE_SIZE value: '{{.Values.interceptor.storageSize}}' + - name: ODS_PRUNE_MIN_KEEP_HOURS + value: '{{int .Values.pipelineRunMinKeepHours}}' + - name: ODS_PRUNE_MAX_KEEP_RUNS + value: '{{int .Values.pipelineRunMaxKeepRuns}}' readinessProbe: httpGet: path: /health diff --git a/deploy/cd-namespace/chart/values.yaml b/deploy/cd-namespace/chart/values.yaml index 7109ec09..bbc29f9a 100644 --- a/deploy/cd-namespace/chart/values.yaml +++ b/deploy/cd-namespace/chart/values.yaml @@ -95,6 +95,12 @@ notification: ] } +# Pipeline(Run) Pruning +# Minimum hours to keep a pipeline run. Has precendence over pipelineRunMaxKeepRuns. +pipelineRunMinKeepHours: '48' +# Maximum number of pipeline runs to keep per stage (stages: DEV, QA, PROD). +pipelineRunMaxKeepRuns: '20' + # Webhook Interceptor interceptor: # PVC (used for the pipeline workspace) diff --git a/docs/design/software-design-specification.adoc b/docs/design/software-design-specification.adoc index 772fcb98..9f652f8e 100644 --- a/docs/design/software-design-specification.adoc +++ b/docs/design/software-design-specification.adoc @@ -371,6 +371,8 @@ For Git commits which message instructs to skip CI, no pipelines are triggererd. A pipeline is created or updated corresponding to the Git branch received in the webhook request. The pipeline name is made out of the component and the sanitized branch. A maximum of 63 characters is respected. Tasks (including `finally` tasks) of the pipline are read from the ODS config file in the repository. A PVC is created per repository unless it exists already. The name is equal to `ods-workspace-` (shortened to 63 characters if longer). This PVC is then used in the pipeline as a shared workspace. + +Pipelines and pipeline runs are pruned when a webhook trigger is received. Pipeline runs that are newer than the configured time window are protected from pruning. Older pipeline runs are cleaned up to not grow beyond the configured maximum amount. If all pipeline runs of one pipeline can be pruned, the whole pipeline is pruned. The pruning strategy is applied per repository and stage (DEV, QA, PROD) to avoid aggressive pruning of QA and PROD pipeline runs. |=== ===== Artifact Download diff --git a/docs/design/software-requirements-specification.adoc b/docs/design/software-requirements-specification.adoc index adca5199..94a42c03 100644 --- a/docs/design/software-requirements-specification.adoc +++ b/docs/design/software-requirements-specification.adoc @@ -63,6 +63,9 @@ The tasks shall create artifacts of their work. Those artifacts shall be stored | SRS-INTERCEPTOR-4 | The interceptor shall create a PVC for use as a pipeline workspace per repository. + +| SRS-INTERCEPTOR-5 +| The interceptor shall prune pipelines and pipeline runs per repository and stage. |=== === Task Requirements diff --git a/internal/interceptor/pruner.go b/internal/interceptor/pruner.go new file mode 100644 index 00000000..3211041b --- /dev/null +++ b/internal/interceptor/pruner.go @@ -0,0 +1,196 @@ +package interceptor + +import ( + "context" + "sort" + "time" + + tektonClient "github.com/opendevstack/pipeline/internal/tekton" + "github.com/opendevstack/pipeline/pkg/config" + "github.com/opendevstack/pipeline/pkg/logging" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // Label set by Tekton identifying the pipeline of a run + tektonPipelineLabel = "tekton.dev/pipeline" +) + +// PipelineRunPruner is the interface for a pruner implementation. +type PipelineRunPruner interface { + // Prune removes pipeline runs (and potentially pipelines) from the + // given list of pipeline runs based on a strategy as decided by the + // implemnter. + Prune(ctxt context.Context, pipelineRuns []tekton.PipelineRun) error +} + +// PipelineRunPrunerByStage prunes pipeline runs by target stage. +// It's behaviour can be controlled through minKeepHours and maxKeepRuns. +// When pruning, it keeps maxKeepRuns number of pipeline runs per stage, +// however it always keeps all pipelines less than minKeepHours old. +// If pruning would prune all pipeline runs of one pipeline (identified by +// label "tekton.dev/pipeline"), then the pipeline is pruned instead (removing +// all dependent pipeline runs through propagation). +type pipelineRunPrunerByStage struct { + client tektonClient.ClientInterface + logger logging.LeveledLoggerInterface + // minKeepHours specifies the minimum hours to keep a pipeline run. + // This setting has precendence over maxKeepRuns. + minKeepHours int + // maxKeepRuns is the maximum number of pipeline runs to keep per stage. + maxKeepRuns int +} + +type prunableResources struct { + pipelineRuns []string + pipelines []string +} + +// NewPipelineRunPrunerByStage returns an instance of pipelineRunPrunerByStage. +func NewPipelineRunPrunerByStage(client tektonClient.ClientInterface, logger logging.LeveledLoggerInterface, minKeepHours, maxKeepRuns int) *pipelineRunPrunerByStage { + return &pipelineRunPrunerByStage{ + client: client, + logger: logger, + minKeepHours: minKeepHours, + maxKeepRuns: maxKeepRuns, + } +} + +// Prune prunes runs within pipelineRuns which can be cleaned up according to +// the strategy in pipelineRunPrunerByStage. +func (p *pipelineRunPrunerByStage) Prune(ctxt context.Context, pipelineRuns []tekton.PipelineRun) error { + p.logger.Debugf("Prune settings: minKeepHours=%d maxKeepRuns=%d", p.minKeepHours, p.maxKeepRuns) + prByStage := p.categorizePipelineRunsByStage(pipelineRuns) + for stage, prs := range prByStage { + p.logger.Debugf("Calculating prunable pipelines / pipeline runs for stage %s ...", stage) + prunable := p.findPrunableResources(prs) + + p.logger.Debugf("Pruning %d \"%s\" stage pipelines and their dependent runs ...", len(prunable.pipelines), stage) + for _, name := range prunable.pipelines { + err := p.prunePipeline(ctxt, name) + if err != nil { + p.logger.Warnf("Failed to prune pipeline %s: %s", name, err) + } + } + + p.logger.Debugf("Pruning %d \"%s\" stage pipeline runs ...", len(prunable.pipelineRuns), stage) + for _, name := range prunable.pipelineRuns { + err := p.pruneRun(ctxt, name) + if err != nil { + p.logger.Warnf("Failed to prune pipeline run %s: %s", name, err) + } + } + } + return nil +} + +// categorizePipelineRunsByStage assigns the given pipelineRuns into buckets +// by target stages (DEV, QA, PROD). +func (p *pipelineRunPrunerByStage) categorizePipelineRunsByStage(pipelineRuns []tekton.PipelineRun) map[string][]tekton.PipelineRun { + pipelineRunsByStage := map[string][]tekton.PipelineRun{ + string(config.DevStage): {}, + string(config.QAStage): {}, + string(config.ProdStage): {}, + } + for _, pr := range pipelineRuns { + stage := pr.Labels[stageLabel] + if _, ok := pipelineRunsByStage[stage]; !ok { + p.logger.Warnf("Unknown stage '%s' for pipeline run %s", stage, pr.Name) + } + pipelineRunsByStage[stage] = append(pipelineRunsByStage[stage], pr) + } + return pipelineRunsByStage +} + +// findPrunableResources finds resources that can be pruned within the given +// pipeline runs. Returned resources are either pipelines or pipeline runs. +// If all pipeline runs of one pipeline can be pruned, the pipeline is +// returned instead of the individual pipeline runs. +func (s *pipelineRunPrunerByStage) findPrunableResources(pipelineRuns []tekton.PipelineRun) *prunableResources { + // Sort pipeline runs by time (descending) + sort.Slice(pipelineRuns, func(i, j int) bool { + return pipelineRuns[j].CreationTimestamp.Time.Before(pipelineRuns[i].CreationTimestamp.Time) + }) + + // Apply cleanup to each bucket. + prunablePipelines := []string{} + prunablePipelineRuns := []string{} + + cutoff := time.Now().Add(time.Duration(s.minKeepHours*-1) * time.Hour) + protectedRuns := []tekton.PipelineRun{} + prunableRuns := []tekton.PipelineRun{} + // Categorize runs as either "protected" or "prunable". + // A run is protected if it is newer than the cutoff time, or if maxKeepRuns + // is not reached yet. + for _, p := range pipelineRuns { + if p.CreationTimestamp.Time.After(cutoff) || len(protectedRuns) < s.maxKeepRuns { + protectedRuns = append(protectedRuns, p) + } else { + prunableRuns = append(prunableRuns, p) + } + } + // Check for each prunable run, if there is another run for the same pipeline + // which is protected. If no such run exists, we want to prune the pipeline + // as a whole instead of individual runs. + for _, pruneableRun := range prunableRuns { + if pipelineIsProtected(pruneableRun.Labels[tektonPipelineLabel], protectedRuns) { + prunablePipelineRuns = append(prunablePipelineRuns, pruneableRun.Name) + } else { + prunablePipelines = append(prunablePipelines, pruneableRun.Labels[tektonPipelineLabel]) + } + } + + return &prunableResources{ + pipelineRuns: unique(prunablePipelineRuns), + pipelines: unique(prunablePipelines), + } +} + +// pipelineIsProtected checks if the pipelineName exists in the given pipeline +// runs. +func pipelineIsProtected(pipelineName string, protected []tekton.PipelineRun) bool { + for _, protect := range protected { + if protect.Labels[tektonPipelineLabel] == pipelineName { + return true + } + } + return false +} + +// pruneRun removes the pipeline run identified by name. The deletion is +// propagated to dependents. +func (p *pipelineRunPrunerByStage) pruneRun(ctxt context.Context, name string) error { + p.logger.Debugf("Pruning pipeline run %s ...", name) + ppPolicy := v1.DeletePropagationForeground + return p.client.DeletePipelineRun( + ctxt, + name, + v1.DeleteOptions{PropagationPolicy: &ppPolicy}, + ) +} + +// prunePipeline removes the pipeline identified by name. The deletion is +// propagated to dependents. +func (p *pipelineRunPrunerByStage) prunePipeline(ctxt context.Context, name string) error { + p.logger.Debugf("Pruning pipeline %s and its dependent runs ...", name) + ppPolicy := v1.DeletePropagationForeground + return p.client.DeletePipeline( + ctxt, + name, + v1.DeleteOptions{PropagationPolicy: &ppPolicy}, + ) +} + +// unique returns a slice of strings where all items appear only once. +func unique(stringSlice []string) []string { + keys := make(map[string]bool) + list := []string{} + for _, entry := range stringSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + list = append(list, entry) + } + } + return list +} diff --git a/internal/interceptor/pruner_test.go b/internal/interceptor/pruner_test.go new file mode 100644 index 00000000..8a59487b --- /dev/null +++ b/internal/interceptor/pruner_test.go @@ -0,0 +1,116 @@ +package interceptor + +import ( + "context" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + tektonClient "github.com/opendevstack/pipeline/internal/tekton" + "github.com/opendevstack/pipeline/pkg/config" + "github.com/opendevstack/pipeline/pkg/logging" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPrune(t *testing.T) { + tclient := &tektonClient.TestClient{} + minKeepHours := 2 + maxKeepRuns := 1 + logger := &logging.LeveledLogger{Level: logging.LevelDebug} + p := NewPipelineRunPrunerByStage(tclient, logger, minKeepHours, maxKeepRuns) + prs := []tekton.PipelineRun{ + { // not pruned + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-a", + CreationTimestamp: metav1.Time{Time: time.Now().Add(time.Minute * -1)}, + Labels: map[string]string{ + stageLabel: config.DevStage, + tektonPipelineLabel: "p-one", + }, + }, + }, + { // would be pruned by maxKeepRuns, but is protected by minKeepHours + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-b", + CreationTimestamp: metav1.Time{Time: time.Now().Add(time.Minute * -3)}, + Labels: map[string]string{ + stageLabel: config.DevStage, + tektonPipelineLabel: "p-one", + }, + }, + }, + { // pruned + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-c", + CreationTimestamp: metav1.Time{Time: time.Now().Add(time.Hour * -4)}, + Labels: map[string]string{ + stageLabel: config.DevStage, + tektonPipelineLabel: "p-one", + }, + }, + }, + { // pruned through pipeline p-two + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-d", + CreationTimestamp: metav1.Time{Time: time.Now().Add(time.Hour * -5)}, + Labels: map[string]string{ + stageLabel: config.DevStage, + tektonPipelineLabel: "p-two", + }, + }, + }, + { // pruned through pipeline p-two + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-e", + CreationTimestamp: metav1.Time{Time: time.Now().Add(time.Hour * -6)}, + Labels: map[string]string{ + stageLabel: config.DevStage, + tektonPipelineLabel: "p-two", + }, + }, + }, + { // not pruned because different stage (QA) + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-e", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: map[string]string{ + stageLabel: config.QAStage, + tektonPipelineLabel: "p-three", + }, + }, + }, + { // not pruned because different stage (PROD) + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-f", + CreationTimestamp: metav1.Time{Time: time.Now().Add(time.Hour * -7)}, + Labels: map[string]string{ + stageLabel: config.ProdStage, + tektonPipelineLabel: "p-four", + }, + }, + }, + { // pruned + ObjectMeta: metav1.ObjectMeta{ + Name: "pr-g", + CreationTimestamp: metav1.Time{Time: time.Now().Add(time.Hour * -8)}, + Labels: map[string]string{ + stageLabel: config.ProdStage, + tektonPipelineLabel: "p-four", + }, + }, + }, + } + err := p.Prune(context.TODO(), prs) + if err != nil { + t.Fatal(err) + } + sort.Strings(tclient.DeletedPipelineRuns) + if diff := cmp.Diff([]string{"pr-c", "pr-g"}, tclient.DeletedPipelineRuns); diff != "" { + t.Fatalf("pr prune mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff([]string{"p-two"}, tclient.DeletedPipelines); diff != "" { + t.Fatalf("p prune mismatch (-want +got):\n%s", diff) + } +} diff --git a/internal/interceptor/server.go b/internal/interceptor/server.go index 0edd051b..80d937ad 100644 --- a/internal/interceptor/server.go +++ b/internal/interceptor/server.go @@ -13,6 +13,7 @@ import ( "regexp" "strconv" "strings" + "sync" "time" kubernetesClient "github.com/opendevstack/pipeline/internal/kubernetes" @@ -25,9 +26,11 @@ import ( kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" ) const ( + // allowedChangeRefType is the Bitbucket change ref handled by this interceptor. allowedChangeRefType = "BRANCH" // Label prefix to use for labels applied by this webhook interceptor. labelPrefix = "pipeline.opendevstack.org/" @@ -35,25 +38,34 @@ const ( repositoryLabel = labelPrefix + "repository" // Label specifying the Git ref (e.g. branch) related to the pipeline. gitRefLabel = labelPrefix + "git-ref" + // Label specifying the target stage of the pipeline. + stageLabel = labelPrefix + "stage" + // tektonTriggerLabel is applied by Tekton Triggers. + tektonTriggerLabel = "triggers.tekton.dev/trigger" + // tektonTriggerLabelValue is applied by Tekton Triggers. + tektonTriggerLabelValue = "ods-pipeline" // Annotation to set the storage provisioner for a PVC. storageProvisionerAnnotation = "volume.beta.kubernetes.io/storage-provisioner" - pvcProtectionFinalizer = "kubernetes.io/pvc-protection" + // PVC finalizer. + pvcProtectionFinalizer = "kubernetes.io/pvc-protection" // letterBytes contains letters to use for random strings. letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" ) // Server represents this service, and is a global. type Server struct { - KubernetesClient kubernetesClient.ClientInterface - TektonClient tektonClient.ClientInterface - Namespace string - Project string - RepoBase string - Token string - TaskKind tekton.TaskKind - TaskSuffix string - StorageConfig StorageConfig - BitbucketClient bitbucketInterface + KubernetesClient kubernetesClient.ClientInterface + TektonClient tektonClient.ClientInterface + Namespace string + Project string + RepoBase string + Token string + TaskKind tekton.TaskKind + TaskSuffix string + StorageConfig StorageConfig + BitbucketClient bitbucketInterface + PipelineRunPruner PipelineRunPruner + pruneMutex sync.Mutex } type StorageConfig struct { @@ -85,6 +97,8 @@ type ServerConfig struct { TektonClient tektonClient.ClientInterface // BitbucketClient is a Bitbucket client BitbucketClient bitbucketInterface + // PipelineRunPruner is responsible to prune pipeline runs. + PipelineRunPruner PipelineRunPruner } type PipelineData struct { @@ -92,6 +106,7 @@ type PipelineData struct { Project string `json:"project"` Component string `json:"component"` Repository string `json:"repository"` + Stage string `json:"stage"` Environment string `json:"environment"` Version string `json:"version"` GitRef string `json:"gitRef"` @@ -137,16 +152,17 @@ func NewServer(serverConfig ServerConfig) (*Server, error) { return nil, errors.New("bitbucket client is required") } return &Server{ - KubernetesClient: serverConfig.KubernetesClient, - TektonClient: serverConfig.TektonClient, - BitbucketClient: serverConfig.BitbucketClient, - Namespace: serverConfig.Namespace, - Project: serverConfig.Project, - RepoBase: serverConfig.RepoBase, - Token: serverConfig.Token, - TaskKind: tekton.TaskKind(serverConfig.TaskKind), - TaskSuffix: serverConfig.TaskSuffix, - StorageConfig: serverConfig.StorageConfig, + KubernetesClient: serverConfig.KubernetesClient, + TektonClient: serverConfig.TektonClient, + BitbucketClient: serverConfig.BitbucketClient, + Namespace: serverConfig.Namespace, + Project: serverConfig.Project, + RepoBase: serverConfig.RepoBase, + Token: serverConfig.Token, + TaskKind: tekton.TaskKind(serverConfig.TaskKind), + TaskSuffix: serverConfig.TaskSuffix, + StorageConfig: serverConfig.StorageConfig, + PipelineRunPruner: serverConfig.PipelineRunPruner, }, nil } @@ -330,23 +346,35 @@ func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) { } pData.Environment = selectEnvironmentFromMapping(odsConfig.BranchToEnvironmentMapping, pData.GitRef) + pData.Stage = string(config.DevStage) + if pData.Environment != "" { + env, err := odsConfig.Environment(pData.Environment) + if err != nil { + msg := fmt.Sprintf("environment misconfiguration: %s", err) + log.Println(requestID, fmt.Sprintf("%s: %s", msg, err)) + http.Error(w, msg, http.StatusInternalServerError) + return + } + pData.Stage = string(env.Stage) + } pData.Version = odsConfig.Version - tknPipeline := assemblePipeline(odsConfig, pData, s.TaskKind, s.TaskSuffix) + newPipeline := assemblePipeline(odsConfig, pData, s.TaskKind, s.TaskSuffix) - _, err = s.TektonClient.GetPipeline(r.Context(), pData.Name, metav1.GetOptions{}) + existingPipeline, err := s.TektonClient.GetPipeline(r.Context(), pData.Name, metav1.GetOptions{}) if err != nil { - _, err := s.TektonClient.CreatePipeline(r.Context(), tknPipeline, metav1.CreateOptions{}) + _, err := s.TektonClient.CreatePipeline(r.Context(), newPipeline, metav1.CreateOptions{}) if err != nil { - msg := fmt.Sprintf("cannot create pipeline %s", tknPipeline.Name) + msg := fmt.Sprintf("cannot create pipeline %s", newPipeline.Name) log.Println(requestID, fmt.Sprintf("%s: %s", msg, err)) http.Error(w, msg, http.StatusInternalServerError) return } } else { - _, err := s.TektonClient.UpdatePipeline(r.Context(), tknPipeline, metav1.UpdateOptions{}) + newPipeline.ResourceVersion = existingPipeline.ResourceVersion + _, err := s.TektonClient.UpdatePipeline(r.Context(), newPipeline, metav1.UpdateOptions{}) if err != nil { - msg := fmt.Sprintf("cannot update pipeline %s", tknPipeline.Name) + msg := fmt.Sprintf("cannot update pipeline %s", newPipeline.Name) log.Println(requestID, fmt.Sprintf("%s: %s", msg, err)) http.Error(w, msg, http.StatusInternalServerError) return @@ -362,6 +390,37 @@ func (s *Server) HandleRoot(w http.ResponseWriter, r *http.Request) { return } + if s.PipelineRunPruner != nil { + go func() { + // Make sure we do not clean up in parallel, which may lead to + // errors or weird results. + s.pruneMutex.Lock() + defer s.pruneMutex.Unlock() + log.Println(requestID, fmt.Sprintf("Starting pruning of pipeline runs related to repository %s ...", pData.Repository)) + ctxt, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + labelMap := map[string]string{ + repositoryLabel: pData.Repository, + tektonTriggerLabel: tektonTriggerLabelValue, + } + pipelineRuns, err := s.TektonClient.ListPipelineRuns( + ctxt, metav1.ListOptions{LabelSelector: labels.Set(labelMap).String()}, + ) + if err != nil { + log.Printf("Could not retrieve existing pipeline runs: %s\n", err) + return + } + log.Println(requestID, fmt.Sprintf("Found %d pipeline runs related to repository %s.", len(pipelineRuns.Items), pData.Repository)) + err = s.PipelineRunPruner.Prune(ctxt, pipelineRuns.Items) + if err != nil { + log.Println(fmt.Sprintf( + "Pruning pipeline runs of repository %s failed: %s", + pData.Repository, err, + )) + } + }() + } + log.Println(requestID, fmt.Sprintf("%+v", pData)) extendedBody, err := extendBodyWithExtensions(body, pData) @@ -388,9 +447,10 @@ func (s *Server) createPVCIfRequired(ctxt context.Context, pData PipelineData) e vm := corev1.PersistentVolumeFilesystem pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: pData.PVC, - Labels: map[string]string{repositoryLabel: pData.Repository}, - Finalizers: []string{pvcProtectionFinalizer}, + Name: pData.PVC, + Labels: map[string]string{repositoryLabel: pData.Repository}, + Finalizers: []string{pvcProtectionFinalizer}, + Annotations: map[string]string{}, }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, @@ -414,6 +474,7 @@ func (s *Server) createPVCIfRequired(ctxt context.Context, pData PipelineData) e return nil } +// selectEnvironmentFromMapping selects the environment name matching given branch. func selectEnvironmentFromMapping(mapping []config.BranchToEnvironmentMapping, branch string) string { for _, bem := range mapping { if mappingBranchMatch(bem.Branch, branch) { @@ -621,6 +682,7 @@ func assemblePipeline(odsConfig *config.ODS, data PipelineData, taskKind tekton. Labels: map[string]string{ repositoryLabel: data.Repository, gitRefLabel: data.GitRef, + stageLabel: data.Stage, }, }, TypeMeta: metav1.TypeMeta{ diff --git a/internal/interceptor/server_test.go b/internal/interceptor/server_test.go index e28fc541..e835e8b0 100644 --- a/internal/interceptor/server_test.go +++ b/internal/interceptor/server_test.go @@ -1,6 +1,7 @@ package interceptor import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -38,6 +39,7 @@ func TestRenderPipeline(t *testing.T) { RepoBase: "https://bitbucket.acme.org", GitURI: "https://bitbucket.acme.org/scm/foo/bar.git", Namespace: "foo-cd", + Stage: "dev", TriggerEvent: "repo:refs_changed", Comment: "", PullRequestKey: 0, @@ -65,6 +67,7 @@ func TestExtensions(t *testing.T) { Project: "foo", Repository: "foo-bar", Component: "bar", + Stage: "dev", Environment: "", Version: "", GitRef: "main", @@ -258,7 +261,16 @@ func fatalIfErr(t *testing.T, err error) { } } -func testServer(kc kubernetes.ClientInterface, tc tektonClient.ClientInterface, bc bitbucketInterface) (*httptest.Server, error) { +type fakePruner struct { + called bool +} + +func (p *fakePruner) Prune(ctxt context.Context, pipelineRuns []tekton.PipelineRun) error { + p.called = true + return nil +} + +func testServer(kc kubernetes.ClientInterface, tc tektonClient.ClientInterface, bc bitbucketInterface, pruner PipelineRunPruner) (*httptest.Server, error) { server, err := NewServer(ServerConfig{ Namespace: "bar-cd", Project: "bar", @@ -266,13 +278,14 @@ func testServer(kc kubernetes.ClientInterface, tc tektonClient.ClientInterface, TaskKind: "ClusterTask", RepoBase: "https://domain.com", StorageConfig: StorageConfig{ - Provisioner: "", - ClassName: "standard", + Provisioner: "kubernetes.io/aws-ebs", + ClassName: "gp2", Size: "2Gi", }, - KubernetesClient: kc, - TektonClient: tc, - BitbucketClient: bc, + KubernetesClient: kc, + TektonClient: tc, + BitbucketClient: bc, + PipelineRunPruner: pruner, }) if err != nil { return nil, err @@ -289,36 +302,45 @@ func TestWebhookHandling(t *testing.T) { bitbucketClient *bitbucket.TestClient wantStatus int wantBody string - check func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient) + check func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient, p *fakePruner) }{ "invalid JSON is not processed": { requestBodyFixture: "interceptor/payload-invalid.json", wantStatus: http.StatusBadRequest, wantBody: "cannot parse JSON: invalid character '\\n' in string literal", - check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient) { + check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient, p *fakePruner) { if len(tc.CreatedPipelines) > 0 || len(tc.UpdatedPipelines) > 0 { t.Fatal("no pipeline should have been created/updated") } + if p.called { + t.Fatal("pruning should not have occured") + } }, }, "unsupported events are not processed": { requestBodyFixture: "interceptor/payload-unknown-event.json", wantStatus: http.StatusBadRequest, wantBody: "Unsupported event key: repo:ref_changed", - check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient) { + check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient, p *fakePruner) { if len(tc.CreatedPipelines) > 0 || len(tc.UpdatedPipelines) > 0 { t.Fatal("no pipeline should have been created/updated") } + if p.called { + t.Fatal("pruning should not have occured") + } }, }, "tags are not processed": { requestBodyFixture: "interceptor/payload-tag.json", wantStatus: http.StatusTeapot, wantBody: "Skipping change ref type TAG, only BRANCH is supported", - check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient) { + check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient, p *fakePruner) { if len(tc.CreatedPipelines) > 0 || len(tc.UpdatedPipelines) > 0 { t.Fatal("no pipeline should have been created/updated") } + if p.called { + t.Fatal("pruning should not have occured") + } }, }, "commits with skip message are not processed": { @@ -334,10 +356,13 @@ func TestWebhookHandling(t *testing.T) { }, wantStatus: http.StatusTeapot, wantBody: "Commit should be skipped", - check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient) { + check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient, p *fakePruner) { if len(tc.CreatedPipelines) > 0 || len(tc.UpdatedPipelines) > 0 { t.Fatal("no pipeline should have been created/updated") } + if p.called { + t.Fatal("pruning should not have occured") + } }, }, "pushes into new branch creates a pipeline": { @@ -349,13 +374,16 @@ func TestWebhookHandling(t *testing.T) { }, wantStatus: http.StatusOK, wantBody: string(readTestdataFile(t, "golden/interceptor/extended-payload.json")), - check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient) { + check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient, p *fakePruner) { if len(tc.CreatedPipelines) != 1 || len(tc.UpdatedPipelines) != 0 { t.Fatal("exactly one pipeline should have been created") } if len(kc.CreatedPVCs) != 1 { t.Fatal("exactly one PVC should have been created") } + if !p.called { + t.Fatal("pruning should have occured") + } }, }, "pushes into an existing branch updates a pipeline": { @@ -387,13 +415,16 @@ func TestWebhookHandling(t *testing.T) { }, wantStatus: http.StatusOK, wantBody: string(readTestdataFile(t, "golden/interceptor/extended-payload.json")), - check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient) { + check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient, p *fakePruner) { if len(tc.CreatedPipelines) != 0 || len(tc.UpdatedPipelines) != 1 { t.Fatal("exactly one pipeline should have been updated") } if len(kc.CreatedPVCs) > 0 { t.Fatal("no PVC should have been created") } + if !p.called { + t.Fatal("pruning should have occured") + } }, }, "PR open events update a pipeline": { @@ -424,10 +455,13 @@ func TestWebhookHandling(t *testing.T) { }, wantStatus: http.StatusOK, wantBody: string(readTestdataFile(t, "golden/interceptor/extended-payload-pr-opened.json")), - check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient) { + check: func(t *testing.T, kc *kubernetes.TestClient, tc *tektonClient.TestClient, bc *bitbucket.TestClient, p *fakePruner) { if len(tc.CreatedPipelines) != 0 || len(tc.UpdatedPipelines) != 1 { t.Fatal("exactly one pipeline should have been updated") } + if !p.called { + t.Fatal("pruning should have occured") + } }, }, "failure to create pipeline is handled properly": { @@ -476,7 +510,8 @@ func TestWebhookHandling(t *testing.T) { if tc.kubernetesClient == nil { tc.kubernetesClient = &kubernetes.TestClient{} } - ts, err := testServer(tc.kubernetesClient, tc.tektonClient, tc.bitbucketClient) + pruner := &fakePruner{} + ts, err := testServer(tc.kubernetesClient, tc.tektonClient, tc.bitbucketClient, pruner) if err != nil { t.Fatal(err) } @@ -503,7 +538,7 @@ func TestWebhookHandling(t *testing.T) { t.Fatalf("body mismatch (-want +got):\n%s", diff) } if tc.check != nil { - tc.check(t, tc.kubernetesClient, tc.tektonClient, tc.bitbucketClient) + tc.check(t, tc.kubernetesClient, tc.tektonClient, tc.bitbucketClient, pruner) } }) } diff --git a/internal/tekton/client.go b/internal/tekton/client.go index 9aee71c7..6bcaa4dc 100644 --- a/internal/tekton/client.go +++ b/internal/tekton/client.go @@ -27,6 +27,7 @@ type ClientConfig struct { type ClientInterface interface { ClientPipelineInterface + ClientPipelineRunInterface } // NewInClusterClient initializes a Tekton client from within a cluster. @@ -79,3 +80,7 @@ func (c *Client) tektonV1beta1Client() v1beta1.TektonV1beta1Interface { func (c *Client) pipelinesClient() v1beta1.PipelineInterface { return c.tektonV1beta1Client().Pipelines(c.namespace()) } + +func (c *Client) pipelineRunsClient() v1beta1.PipelineRunInterface { + return c.tektonV1beta1Client().PipelineRuns(c.namespace()) +} diff --git a/internal/tekton/pipeline.go b/internal/tekton/pipeline.go index f4cb50bc..b27035e2 100644 --- a/internal/tekton/pipeline.go +++ b/internal/tekton/pipeline.go @@ -11,6 +11,7 @@ type ClientPipelineInterface interface { GetPipeline(ctxt context.Context, name string, options metav1.GetOptions) (*tekton.Pipeline, error) CreatePipeline(ctxt context.Context, pipeline *tekton.Pipeline, options metav1.CreateOptions) (*tekton.Pipeline, error) UpdatePipeline(ctxt context.Context, pipeline *tekton.Pipeline, options metav1.UpdateOptions) (*tekton.Pipeline, error) + DeletePipeline(ctxt context.Context, name string, options metav1.DeleteOptions) error } func (c *Client) GetPipeline(ctxt context.Context, name string, options metav1.GetOptions) (*tekton.Pipeline, error) { @@ -27,3 +28,8 @@ func (c *Client) UpdatePipeline(ctxt context.Context, pipeline *tekton.Pipeline, c.logger().Debugf("Update pipeline %s", pipeline.Name) return c.pipelinesClient().Update(ctxt, pipeline, options) } + +func (c *Client) DeletePipeline(ctxt context.Context, name string, options metav1.DeleteOptions) error { + c.logger().Debugf("Delete pipeline %s", name) + return c.pipelinesClient().Delete(ctxt, name, options) +} diff --git a/internal/tekton/pipelinerun.go b/internal/tekton/pipelinerun.go new file mode 100644 index 00000000..fc4faad1 --- /dev/null +++ b/internal/tekton/pipelinerun.go @@ -0,0 +1,41 @@ +package tekton + +import ( + "context" + + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ClientPipelineRunInterface interface { + ListPipelineRuns(ctxt context.Context, options metav1.ListOptions) (*tekton.PipelineRunList, error) + GetPipelineRun(ctxt context.Context, name string, options metav1.GetOptions) (*tekton.PipelineRun, error) + CreatePipelineRun(ctxt context.Context, pipeline *tekton.PipelineRun, options metav1.CreateOptions) (*tekton.PipelineRun, error) + UpdatePipelineRun(ctxt context.Context, pipeline *tekton.PipelineRun, options metav1.UpdateOptions) (*tekton.PipelineRun, error) + DeletePipelineRun(ctxt context.Context, name string, options metav1.DeleteOptions) error +} + +func (c *Client) ListPipelineRuns(ctxt context.Context, options metav1.ListOptions) (*tekton.PipelineRunList, error) { + c.logger().Debugf("Get pipeline runs") + return c.pipelineRunsClient().List(ctxt, options) +} + +func (c *Client) GetPipelineRun(ctxt context.Context, name string, options metav1.GetOptions) (*tekton.PipelineRun, error) { + c.logger().Debugf("Get pipeline run %s", name) + return c.pipelineRunsClient().Get(ctxt, name, options) +} + +func (c *Client) CreatePipelineRun(ctxt context.Context, pipeline *tekton.PipelineRun, options metav1.CreateOptions) (*tekton.PipelineRun, error) { + c.logger().Debugf("Create pipeline run %s", pipeline.Name) + return c.pipelineRunsClient().Create(ctxt, pipeline, options) +} + +func (c *Client) UpdatePipelineRun(ctxt context.Context, pipeline *tekton.PipelineRun, options metav1.UpdateOptions) (*tekton.PipelineRun, error) { + c.logger().Debugf("Update pipeline run %s", pipeline.Name) + return c.pipelineRunsClient().Update(ctxt, pipeline, options) +} + +func (c *Client) DeletePipelineRun(ctxt context.Context, name string, options metav1.DeleteOptions) error { + c.logger().Debugf("Delete pipeline run %s", name) + return c.pipelineRunsClient().Delete(ctxt, name, options) +} diff --git a/internal/tekton/test_client.go b/internal/tekton/test_client.go index 7976902d..c798f602 100644 --- a/internal/tekton/test_client.go +++ b/internal/tekton/test_client.go @@ -21,6 +21,25 @@ type TestClient struct { FailUpdatePipeline bool // UpdatedPipelines is a slice of updated pipeline names. UpdatedPipelines []string + // FailDeletePipeline lets pipeline deletion fail. + FailDeletePipeline bool + // DeletedPipelines is a slice of deleted pipeline names. + DeletedPipelines []string + + // PipelineRuns is the pool of pipeline runs which can be retrieved. + PipelineRuns []*tekton.PipelineRun + // FailCreatePipelineRun lets pipeline run creation fail. + FailCreatePipelineRun bool + // CreatedPipelineRuns is a slice of created pipeline run names. + CreatedPipelineRuns []string + // FailUpdatePipelineRun lets pipeline run update fail. + FailUpdatePipelineRun bool + // UpdatedPipelineRuns is a slice of updated pipeline run names. + UpdatedPipelineRuns []string + // FailDeletePipelineRun lets pipeline run deletion fail. + FailDeletePipelineRun bool + // DeletedPipelineRuns is a slice of deleted pipeline run names. + DeletedPipelineRuns []string } func (c *TestClient) GetPipeline(ctxt context.Context, name string, options metav1.GetOptions) (*tekton.Pipeline, error) { @@ -47,3 +66,52 @@ func (c *TestClient) UpdatePipeline(ctxt context.Context, pipeline *tekton.Pipel } return pipeline, nil } + +func (c *TestClient) DeletePipeline(ctxt context.Context, name string, options metav1.DeleteOptions) error { + c.DeletedPipelines = append(c.DeletedPipelines, name) + if c.FailDeletePipeline { + return errors.New("delete error") + } + return nil +} + +func (c *TestClient) ListPipelineRuns(ctxt context.Context, options metav1.ListOptions) (*tekton.PipelineRunList, error) { + items := []tekton.PipelineRun{} + for _, pr := range c.PipelineRuns { + items = append(items, *pr) + } + return &tekton.PipelineRunList{Items: items}, nil +} + +func (c *TestClient) GetPipelineRun(ctxt context.Context, name string, options metav1.GetOptions) (*tekton.PipelineRun, error) { + for _, p := range c.PipelineRuns { + if p.Name == name { + return p, nil + } + } + return nil, fmt.Errorf("pipeline run %s not found", name) +} + +func (c *TestClient) CreatePipelineRun(ctxt context.Context, pipeline *tekton.PipelineRun, options metav1.CreateOptions) (*tekton.PipelineRun, error) { + c.CreatedPipelineRuns = append(c.CreatedPipelineRuns, pipeline.Name) + if c.FailCreatePipelineRun { + return nil, errors.New("creation error") + } + return pipeline, nil +} + +func (c *TestClient) UpdatePipelineRun(ctxt context.Context, pipeline *tekton.PipelineRun, options metav1.UpdateOptions) (*tekton.PipelineRun, error) { + c.UpdatedPipelineRuns = append(c.UpdatedPipelineRuns, pipeline.Name) + if c.FailUpdatePipelineRun { + return nil, errors.New("update error") + } + return pipeline, nil +} + +func (c *TestClient) DeletePipelineRun(ctxt context.Context, name string, options metav1.DeleteOptions) error { + c.DeletedPipelineRuns = append(c.DeletedPipelineRuns, name) + if c.FailDeletePipelineRun { + return errors.New("delete error") + } + return nil +} diff --git a/test/testdata/golden/interceptor/extended-payload-pr-opened.json b/test/testdata/golden/interceptor/extended-payload-pr-opened.json index 25f61115..6a108dae 100644 --- a/test/testdata/golden/interceptor/extended-payload-pr-opened.json +++ b/test/testdata/golden/interceptor/extended-payload-pr-opened.json @@ -15,6 +15,7 @@ "project": "foo", "component": "bar", "repository": "foo-bar", + "stage": "dev", "environment": "", "version": "", "gitRef": "feature/foo", diff --git a/test/testdata/golden/interceptor/extended-payload.json b/test/testdata/golden/interceptor/extended-payload.json index 62bed94c..3b9fe0d2 100644 --- a/test/testdata/golden/interceptor/extended-payload.json +++ b/test/testdata/golden/interceptor/extended-payload.json @@ -35,6 +35,7 @@ "project": "foo", "component": "bar", "repository": "foo-bar", + "stage": "dev", "environment": "", "version": "", "gitRef": "master", diff --git a/test/testdata/golden/interceptor/payload.json b/test/testdata/golden/interceptor/payload.json index c0e1803a..e56662d6 100644 --- a/test/testdata/golden/interceptor/payload.json +++ b/test/testdata/golden/interceptor/payload.json @@ -77,6 +77,7 @@ "project": "foo", "component": "bar", "repository": "foo-bar", + "stage": "dev", "environment": "", "version": "", "gitRef": "main", diff --git a/test/testdata/golden/interceptor/pipeline.yaml b/test/testdata/golden/interceptor/pipeline.yaml index 5fa31323..c0c1a9b4 100644 --- a/test/testdata/golden/interceptor/pipeline.yaml +++ b/test/testdata/golden/interceptor/pipeline.yaml @@ -5,6 +5,7 @@ metadata: labels: pipeline.opendevstack.org/git-ref: main pipeline.opendevstack.org/repository: foo-bar + pipeline.opendevstack.org/stage: dev name: bar-main spec: description: ODS