Skip to content

Commit

Permalink
feat: trigger on deploy status and periodically
Browse files Browse the repository at this point in the history
  • Loading branch information
msvticket committed Jul 26, 2023
1 parent a072a17 commit aad8df3
Show file tree
Hide file tree
Showing 18 changed files with 357 additions and 25 deletions.
2 changes: 1 addition & 1 deletion pkg/apis/lighthouse/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ func (s *LighthouseJobSpec) GetEnvVars() map[string]string {
env[PullRefsEnv] = s.Refs.String()
}

if s.Type == job.PostsubmitJob || s.Type == job.BatchJob {
if s.Type != job.PresubmitJob {
return env
}

Expand Down
16 changes: 13 additions & 3 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,16 @@ func (c *Config) GetPostsubmits(repository scm.Repository) []job.Postsubmit {
return answer
}

// GetDeployments lets return all the deployments
func (c *Config) GetDeployments(repository scm.Repository) []job.Deployment {
fullNames := util.FullNames(repository)
var answer []job.Deployment
for _, fn := range fullNames {
answer = append(answer, c.Deployments[fn]...)
}
return answer
}

// GetPresubmits lets return all the pre submits for the given repo
func (c *Config) GetPresubmits(repository scm.Repository) []job.Presubmit {
fullNames := util.FullNames(repository)
Expand All @@ -262,9 +272,9 @@ func (c *Config) GetPresubmits(repository scm.Repository) []job.Presubmit {
}

// BranchRequirements partitions status contexts for a given org, repo branch into three buckets:
// - contexts that are always required to be present
// - contexts that are required, _if_ present
// - contexts that are always optional
// - contexts that are always required to be present
// - contexts that are required, _if_ present
// - contexts that are always optional
func BranchRequirements(org, repo, branch string, presubmits map[string][]job.Presubmit) ([]string, []string, []string) {
jobs, ok := presubmits[org+"/"+repo]
if !ok {
Expand Down
3 changes: 2 additions & 1 deletion pkg/config/job/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ type Config struct {
Presubmits map[string][]Presubmit `json:"presubmits,omitempty"`
Postsubmits map[string][]Postsubmit `json:"postsubmits,omitempty"`
// Periodics are not associated with any repo.
Periodics []Periodic `json:"periodics,omitempty"`
Periodics []Periodic `json:"periodics,omitempty"`
Deployments map[string][]Deployment `json:"deployments,omitempty"`
}

func resolvePresets(name string, labels map[string]string, spec *v1.PodSpec, presets []Preset) error {
Expand Down
13 changes: 13 additions & 0 deletions pkg/config/job/deployment.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
package job

type Deployment struct {
Base
Reporter
// The deployment state that trigger this pipeline
// Can be one of: error, failure, inactive, in_progress, queued, pending, success
// If not set all deployment state event triggers
State string `json:"state,omitempty"`
// Deployment for this environment trigger this pipeline
// If not set deployments for all environments trigger
Environment string `json:"environment,omitempty"`
}
5 changes: 3 additions & 2 deletions pkg/config/job/periodic.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,11 @@ package job
// Periodic runs on a timer.
type Periodic struct {
Base
Reporter
// The branch to build
Branch string `json:"branch"`
// Cron representation of job trigger time
Cron string `json:"cron"`
// Tags for config entries
Tags []string `json:"tags,omitempty"`
}

// SetDefaults initializes default values
Expand Down
2 changes: 2 additions & 0 deletions pkg/config/job/pipelinekind.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ const (
PostsubmitJob PipelineKind = "postsubmit"
// Periodic job means it runs on a time-basis, unrelated to git changes.
PeriodicJob PipelineKind = "periodic"
// Deployment job means it runs on deployment status event
DeploymentJob PipelineKind = "deployment"
// BatchJob tests multiple unmerged PRs at the same time.
BatchJob PipelineKind = "batch"
)
14 changes: 13 additions & 1 deletion pkg/jobutil/jobutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,10 +143,22 @@ func PostsubmitSpec(logger *logrus.Entry, p job.Postsubmit, refs v1alpha1.Refs)
return pjs
}

// DeploymentSpec initializes a PipelineOptionsSpec for a given deployment job.
func DeploymentSpec(logger *logrus.Entry, p job.Deployment, refs v1alpha1.Refs) v1alpha1.LighthouseJobSpec {
pjs := specFromJobBase(logger, p.Base)
pjs.Type = job.DeploymentJob
pjs.Context = p.Context
pjs.Refs = completePrimaryRefs(refs, p.Base)

return pjs
}

// PeriodicSpec initializes a PipelineOptionsSpec for a given periodic job.
func PeriodicSpec(logger *logrus.Entry, p job.Periodic) v1alpha1.LighthouseJobSpec {
func PeriodicSpec(logger *logrus.Entry, p job.Periodic, refs v1alpha1.Refs) v1alpha1.LighthouseJobSpec {
pjs := specFromJobBase(logger, p.Base)
pjs.Type = job.PeriodicJob
pjs.Context = p.Context
pjs.Refs = completePrimaryRefs(refs, p.Base)

return pjs
}
Expand Down
21 changes: 11 additions & 10 deletions pkg/plugins/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,17 @@ import (

// Plugin defines a plugin and its handlers
type Plugin struct {
Description string
ExcludedProviders sets.String
ConfigHelpProvider ConfigHelpProvider
IssueHandler IssueHandler
PullRequestHandler PullRequestHandler
PushEventHandler PushEventHandler
ReviewEventHandler ReviewEventHandler
StatusEventHandler StatusEventHandler
GenericCommentHandler GenericCommentHandler
Commands []Command
Description string
ExcludedProviders sets.String
ConfigHelpProvider ConfigHelpProvider
IssueHandler IssueHandler
PullRequestHandler PullRequestHandler
PushEventHandler PushEventHandler
ReviewEventHandler ReviewEventHandler
StatusEventHandler StatusEventHandler
DeploymentStatusHandler DeploymentStatusHandler
GenericCommentHandler GenericCommentHandler
Commands []Command
}

// InvokeCommandHandler calls InvokeHandler on all commands
Expand Down
2 changes: 2 additions & 0 deletions pkg/plugins/plugins.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ type PullRequestHandler func(Agent, scm.PullRequestHook) error
// StatusEventHandler defines the function contract for a scm.Status handler.
type StatusEventHandler func(Agent, scm.Status) error

type DeploymentStatusHandler func(Agent, scm.DeploymentStatusHook) error

// PushEventHandler defines the function contract for a scm.PushHook handler.
type PushEventHandler func(Agent, scm.PushHook) error

Expand Down
39 changes: 39 additions & 0 deletions pkg/plugins/trigger/deployment.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package trigger

import (
"github.com/jenkins-x/go-scm/scm"
"github.com/jenkins-x/lighthouse/pkg/apis/lighthouse/v1alpha1"
"github.com/jenkins-x/lighthouse/pkg/jobutil"
"github.com/jenkins-x/lighthouse/pkg/scmprovider"
)

func handleDeployment(c Client, ds scm.DeploymentStatusHook) error {
for _, j := range c.Config.GetDeployments(ds.Repo) {
if j.State != "" && j.State != ds.DeploymentStatus.State {
continue
}
if j.Environment != "" && j.Environment != ds.Deployment.Environment {
continue
}
labels := make(map[string]string)
for k, v := range j.Labels {
labels[k] = v
}
refs := v1alpha1.Refs{
Org: ds.Repo.Namespace,
Repo: ds.Repo.Name,
BaseRef: ds.Deployment.Ref,
BaseSHA: ds.Deployment.Sha,
BaseLink: ds.Deployment.RepositoryLink,
CloneURI: ds.Repo.Clone,
}
labels[scmprovider.EventGUID] = ds.DeploymentStatus.ID
pj := jobutil.NewLighthouseJob(jobutil.DeploymentSpec(c.Logger, j, refs), labels, j.Annotations)
c.Logger.WithFields(jobutil.LighthouseJobFields(&pj)).Info("Creating a new LighthouseJob.")
if _, err := c.LauncherClient.Launch(&pj); err != nil {
return err
}

}
return nil
}
84 changes: 84 additions & 0 deletions pkg/plugins/trigger/periodic.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
package trigger

import (
"fmt"
"strings"

"github.com/jenkins-x/lighthouse/pkg/apis/lighthouse/v1alpha1"
"github.com/jenkins-x/lighthouse/pkg/config"
"github.com/jenkins-x/lighthouse/pkg/config/job"
"github.com/jenkins-x/lighthouse/pkg/filebrowser"
"github.com/jenkins-x/lighthouse/pkg/jobutil"
"github.com/jenkins-x/lighthouse/pkg/scmprovider"
"github.com/jenkins-x/lighthouse/pkg/triggerconfig/inrepo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"gopkg.in/robfig/cron.v2"
)

type PeriodicExec struct {
job.Periodic
Owner, Repo string
LauncherClient launcher
}

func (p *PeriodicExec) Run() {
labels := make(map[string]string)
for k, v := range p.Labels {
labels[k] = v
}
refs := v1alpha1.Refs{
Org: p.Owner,
Repo: p.Repo,
BaseRef: p.Branch,
}
l := logrus.WithField(scmprovider.RepoLogField, p.Repo).WithField(scmprovider.OrgLogField, p.Owner)

pj := jobutil.NewLighthouseJob(jobutil.PeriodicSpec(l, p.Periodic, refs), labels, p.Annotations)
l.WithFields(jobutil.LighthouseJobFields(&pj)).Info("Creating a new LighthouseJob.")
_, err := p.LauncherClient.Launch(&pj)
if err != nil {
l.WithError(err).Error("Failed to create lighthouse job for cron ")
}
}

func StartPeriodics(configAgent *config.Agent, launcher launcher, fileBrowsers *filebrowser.FileBrowsers) {
periodics := cron.New()
periodicExists := false
resolverCache := inrepo.NewResolverCache()
fc := filebrowser.NewFetchCache()
c := configAgent.Config()
for fullName := range c.InRepoConfig.Enabled {
repo := strings.SplitN(fullName, "/", 2)
if len(repo) != 2 {
logrus.Errorf("Wrong format of %s, not owner/repo", fullName)
continue
}
// TODO Ensure that the repo clones are removed afterwards and deregistered
cfg, err := inrepo.LoadTriggerConfig(fileBrowsers, fc, resolverCache, repo[0], repo[1], "")
if err != nil {
logrus.Error(errors.Wrapf(err, "failed to calculate in repo config"))
continue
}

for i := range cfg.Spec.Periodics {
periodic := cfg.Spec.Periodics[i]
// TODO Support rereading periodics configuration
_, err := periodics.AddJob(periodic.Cron, &PeriodicExec{
Periodic: periodic,
Owner: repo[0],
Repo: repo[1],
LauncherClient: launcher,
})
if err != nil {
logrus.Error(errors.Wrapf(err, fmt.Sprintf("failed to schedule job %s in %s", periodic.Name, fullName)))
} else {
periodicExists = true
}
}
}

if periodicExists {
periodics.Start()
}
}
11 changes: 8 additions & 3 deletions pkg/plugins/trigger/trigger.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,10 @@ var plugin = plugins.Plugin{
Description: `The trigger plugin starts tests in reaction to commands and pull request events. It is responsible for ensuring that test jobs are only run on trusted PRs. A PR is considered trusted if the author is a member of the 'trusted organization' for the repository or if such a member has left an '/ok-to-test' command on the PR.
<br>Trigger starts jobs automatically when a new trusted PR is created or when an untrusted PR becomes trusted, but it can also be used to start jobs manually via the '/test' command.
<br>The '/retest' command can be used to rerun jobs that have reported failure.`,
ConfigHelpProvider: configHelp,
PullRequestHandler: handlePullRequest,
PushEventHandler: handlePush,
ConfigHelpProvider: configHelp,
PullRequestHandler: handlePullRequest,
PushEventHandler: handlePush,
DeploymentStatusHandler: handleDeploymentStatus,
Commands: []plugins.Command{{
Name: "ok-to-test",
Description: "Marks a PR as 'trusted' and starts tests.",
Expand Down Expand Up @@ -75,6 +76,10 @@ var plugin = plugins.Plugin{
}},
}

func handleDeploymentStatus(agent plugins.Agent, ds scm.DeploymentStatusHook) error {
return handleDeployment(getClient(agent), ds)
}

func init() {
customTriggerCommand := os.Getenv(customerTriggerCommandEnvVar)
if customTriggerCommand != "" {
Expand Down
71 changes: 71 additions & 0 deletions pkg/triggerconfig/inrepo/load_triggers.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ func mergeConfigs(m map[string]*triggerconfig.Config) (*triggerconfig.Config, er
// lets check for duplicates
presubmitNames := map[string]string{}
postsubmitNames := map[string]string{}
periodicNames := map[string]string{}
deploymentNames := map[string]string{}
for file, cfg := range m {
for _, ps := range cfg.Spec.Presubmits {
name := ps.Name
Expand All @@ -114,6 +116,24 @@ func mergeConfigs(m map[string]*triggerconfig.Config) (*triggerconfig.Config, er
return nil, errors.Errorf("duplicate postsubmit %s in file %s and %s", name, otherFile, file)
}
}
for _, ps := range cfg.Spec.Periodics {
name := ps.Name
otherFile := periodicNames[name]
if otherFile == "" {
periodicNames[name] = file
} else {
return nil, errors.Errorf("duplicate periodic %s in file %s and %s", name, otherFile, file)
}
}
for _, ps := range cfg.Spec.Deployments {
name := ps.Name
otherFile := deploymentNames[name]
if otherFile == "" {
deploymentNames[name] = file
} else {
return nil, errors.Errorf("duplicate deployment %s in file %s and %s", name, otherFile, file)
}
}
answer = merge.CombineConfigs(answer, cfg)
}
if answer == nil {
Expand Down Expand Up @@ -193,6 +213,57 @@ func loadConfigFile(filePath string, fileBrowsers *filebrowser.FileBrowsers, fc
})
}
}
for i := range repoConfig.Spec.Deployments {
r := &repoConfig.Spec.Deployments[i]
sourcePath := r.SourcePath
if sourcePath != "" {
if r.Agent == "" {
r.Agent = job.TektonPipelineAgent
}
// lets load the local file data now as we have locked the git file system
data, err := loadLocalFile(dir, sourcePath, sha)
if err != nil {
return nil, err
}
r.SetPipelineLoader(func(base *job.Base) error {
err = loadJobBaseFromSourcePath(data, fileBrowsers, fc, cache, base, ownerName, repoName, sourcePath, sha)
if err != nil {
return errors.Wrapf(err, "failed to load source for deployment %s", r.Name)
}
r.Base = *base
if r.Agent == "" && r.PipelineRunSpec != nil {
r.Agent = job.TektonPipelineAgent
}
return nil
})
}
}
for i := range repoConfig.Spec.Periodics {
r := &repoConfig.Spec.Periodics[i]
sourcePath := r.SourcePath
if sourcePath != "" {
if r.Agent == "" {
r.Agent = job.TektonPipelineAgent
}
// lets load the local file data now as we have locked the git file system
data, err := loadLocalFile(dir, sourcePath, sha)
if err != nil {
return nil, err
}
r.SetPipelineLoader(func(base *job.Base) error {
err = loadJobBaseFromSourcePath(data, fileBrowsers, fc, cache, base, ownerName, repoName, sourcePath, sha)
if err != nil {
return errors.Wrapf(err, "failed to load source for periodic %s", r.Name)
}
r.Base = *base
if r.Agent == "" && r.PipelineRunSpec != nil {
r.Agent = job.TektonPipelineAgent
}
return nil
})
}
}

return repoConfig, nil
}

Expand Down
Loading

0 comments on commit aad8df3

Please sign in to comment.