diff --git a/deps.go b/deps.go new file mode 100644 index 00000000..cd926542 --- /dev/null +++ b/deps.go @@ -0,0 +1,246 @@ +package dalec + +import ( + goerrors "errors" + "slices" + + "github.com/pkg/errors" +) + +// PackageConstraints is used to specify complex constraints for a package dependency. +type PackageConstraints struct { + // Version is a list of version constraints for the package. + // The format of these strings is dependent on the package manager of the target system. + // Examples: + // [">=1.0.0", "<2.0.0"] + Version []string `yaml:"version,omitempty" json:"version,omitempty"` + // Arch is a list of architecture constraints for the package. + // Use this to specify that a package constraint only applies to certain architectures. + Arch []string `yaml:"arch,omitempty" json:"arch,omitempty"` +} + +// PackageDependencies is a list of dependencies for a package. +// This will be included in the package metadata so that the package manager can install the dependencies. +// It also includes build-time dedendencies, which we'll install before running any build steps. +type PackageDependencies struct { + // Build is the list of packagese required to build the package. + Build map[string]PackageConstraints `yaml:"build,omitempty" json:"build,omitempty"` + // Runtime is the list of packages required to install/run the package. + Runtime map[string]PackageConstraints `yaml:"runtime,omitempty" json:"runtime,omitempty"` + // Recommends is the list of packages recommended to install with the generated package. + // Note: Not all package managers support this (e.g. rpm) + Recommends map[string]PackageConstraints `yaml:"recommends,omitempty" json:"recommends,omitempty"` + + // Test lists any extra packages required for running tests + // These packages are only installed for tests which have steps that require + // running a command in the built container. + // See [TestSpec] for more information. + Test []string `yaml:"test,omitempty" json:"test,omitempty"` + + // ExtraRepos is used to inject extra package repositories that may be used to + // satisfy package dependencies in various stages. + ExtraRepos []PackageRepositoryConfig `yaml:"extra_repos,omitempty" json:"extra_repos,omitempty"` +} + +// PackageRepositoryConfig +type PackageRepositoryConfig struct { + // Keys are the list of keys that need to be imported to use the configured + // repositories + Keys map[string]Source `yaml:"keys,omitempty" json:"keys,omitempty"` + + // Config list of repo configs to to add to the environment. The format of + // these configs are distro specific (e.g. apt/yum configs). + Config map[string]Source `yaml:"config" json:"config"` + + // Data lists all the extra data that needs to be made available for the + // provided repository config to work. + // As an example, if the provided config is referencing a file backed repository + // then data would include the file data, assuming its not already available + // in the environment. + Data []SourceMount `yaml:"data,omitempty" json:"data,omitempty"` + // Envs specifies the list of environments to make the repositories available + // during. + // Acceptable values are: + // - "build" - Repositories are added prior to installing build dependencies + // - "test" - Repositories are added prior to installing test dependencies + // - "install" - Repositories are added prior to installing the output + // package in a container build target. + Envs []string `yaml:"envs" json:"envs" jsonschema:"enum=build,enum=test,enum=install"` +} + +func (d *PackageDependencies) processBuildArgs(args map[string]string, allowArg func(string) bool) error { + if d == nil { + return nil + } + + var errs []error + for i, repo := range d.ExtraRepos { + if err := repo.processBuildArgs(args, allowArg); err != nil { + errs = append(errs, errors.Wrapf(err, "extra repos index %d", i)) + } + d.ExtraRepos[i] = repo + } + return goerrors.Join(errs...) +} + +func (r *PackageRepositoryConfig) processBuildArgs(args map[string]string, allowArg func(string) bool) error { + if r == nil { + return nil + } + + var errs []error + + for k := range r.Config { + src := r.Config[k] + if err := src.processBuildArgs(args, allowArg); err != nil { + errs = append(errs, errors.Wrapf(err, "config %s", k)) + continue + } + r.Config[k] = src + } + + for k := range r.Keys { + src := r.Keys[k] + if err := src.processBuildArgs(args, allowArg); err != nil { + errs = append(errs, errors.Wrapf(err, "key %s", k)) + continue + } + r.Keys[k] = src + } + + for i := range r.Data { + d := r.Data[i] + if err := d.processBuildArgs(args, allowArg); err != nil { + errs = append(errs, errors.Wrapf(err, "data index %d", i)) + continue + } + r.Data[i] = d + } + + return goerrors.Join(errs...) +} + +func (d *PackageDependencies) fillDefaults() { + if d == nil { + return + } + + for i, r := range d.ExtraRepos { + r.fillDefaults() + d.ExtraRepos[i] = r + } +} + +func (r *PackageRepositoryConfig) fillDefaults() { + if len(r.Envs) == 0 { + // default to all stages for the extra repo if unspecified + r.Envs = []string{"build", "install", "test"} + } + + for i, src := range r.Config { + fillDefaults(&src) + r.Config[i] = src + } + + for i, src := range r.Keys { + fillDefaults(&src) + + // Default to 0644 permissions for gpg keys. This is because apt will will only import + // keys with a particular permission set. + if src.HTTP != nil { + src.HTTP.Permissions = 0644 + } + r.Keys[i] = src + } + + for i, mount := range r.Data { + mount.fillDefaults() + r.Data[i] = mount + } +} + +func (d *PackageDependencies) validate() error { + if d == nil { + return nil + } + + var errs []error + for i, r := range d.ExtraRepos { + if err := r.validate(); err != nil { + errs = append(errs, errors.Wrapf(err, "extra repo %d", i)) + } + } + + return goerrors.Join(errs...) +} + +func (r *PackageRepositoryConfig) validate() error { + var errs []error + for name, src := range r.Keys { + if err := src.validate(); err != nil { + errs = append(errs, errors.Wrapf(err, "key %s", name)) + } + } + for name, src := range r.Config { + if err := src.validate(); err != nil { + errs = append(errs, errors.Wrapf(err, "config %s", name)) + } + } + for _, mnt := range r.Data { + if err := mnt.validate("/"); err != nil { + errs = append(errs, errors.Wrapf(err, "data mount path %s", mnt.Dest)) + } + } + + return goerrors.Join(errs...) +} + +func (p *PackageDependencies) GetExtraRepos(env string) []PackageRepositoryConfig { + return GetExtraRepos(p.ExtraRepos, env) +} + +func GetExtraRepos(repos []PackageRepositoryConfig, env string) []PackageRepositoryConfig { + var out []PackageRepositoryConfig + for _, repo := range repos { + if slices.Contains(repo.Envs, env) { + out = append(repos, repo) + } + } + return out +} + +func (s *Spec) GetBuildRepos(targetKey string) []PackageRepositoryConfig { + deps := s.GetPackageDeps(targetKey) + if deps == nil { + deps = s.Dependencies + if deps == nil { + return nil + } + } + + return deps.GetExtraRepos("build") +} + +func (s *Spec) GetInstallRepos(targetKey string) []PackageRepositoryConfig { + deps := s.GetPackageDeps(targetKey) + if deps == nil { + deps = s.Dependencies + if deps == nil { + return nil + } + } + + return deps.GetExtraRepos("install") +} + +func (s *Spec) GetTestRepos(targetKey string) []PackageRepositoryConfig { + deps := s.GetPackageDeps(targetKey) + if deps == nil { + deps = s.Dependencies + if deps == nil { + return nil + } + } + + return deps.GetExtraRepos("test") +} diff --git a/frontend/mux.go b/frontend/mux.go index 384fffa3..e454185c 100644 --- a/frontend/mux.go +++ b/frontend/mux.go @@ -205,12 +205,9 @@ func (m *BuildMux) loadSpec(ctx context.Context, client gwclient.Client) (*dalec // Note: this is not suitable for passing to builds since it does not have platform information spec, err := LoadSpec(ctx, dc, nil, func(cfg *LoadConfig) { - cfg.SubstituteOpts = append(cfg.SubstituteOpts, func(cfg *dalec.SubstituteConfig) { - // Allow any args here since we aren't trying to validate the spec at this point. - cfg.AllowArg = func(string) bool { - return true - } - }) + // We want to allow any arg to be passed to the spec since we don't know what + // args are valid at this point, nor do we care here. + cfg.SubstituteOpts = append(cfg.SubstituteOpts, dalec.WithAllowAnyArg) }) if err != nil { return nil, err diff --git a/helpers.go b/helpers.go index 09d6be5d..1891beb1 100644 --- a/helpers.go +++ b/helpers.go @@ -324,42 +324,6 @@ func (s *Spec) GetBuildDeps(targetKey string) map[string]PackageConstraints { return deps.Build } -func (s *Spec) GetBuildRepos(targetKey string) []PackageRepositoryConfig { - deps := s.GetPackageDeps(targetKey) - if deps == nil { - deps = s.Dependencies - if deps == nil { - return nil - } - } - - return deps.GetExtraRepos("build") -} - -func (s *Spec) GetInstallRepos(targetKey string) []PackageRepositoryConfig { - deps := s.GetPackageDeps(targetKey) - if deps == nil { - deps = s.Dependencies - if deps == nil { - return nil - } - } - - return deps.GetExtraRepos("install") -} - -func (s *Spec) GetTestRepos(targetKey string) []PackageRepositoryConfig { - deps := s.GetPackageDeps(targetKey) - if deps == nil { - deps = s.Dependencies - if deps == nil { - return nil - } - } - - return deps.GetExtraRepos("test") -} - func (s *Spec) GetTestDeps(targetKey string) []string { var deps *PackageDependencies if t, ok := s.Targets[targetKey]; ok { @@ -623,17 +587,3 @@ func BaseImageConfig(platform *ocispecs.Platform) *DockerImageSpec { return img } - -func (p *PackageDependencies) GetExtraRepos(env string) []PackageRepositoryConfig { - return GetExtraRepos(p.ExtraRepos, env) -} - -func GetExtraRepos(repos []PackageRepositoryConfig, env string) []PackageRepositoryConfig { - var out []PackageRepositoryConfig - for _, repo := range repos { - if slices.Contains(repo.Envs, env) { - out = append(repos, repo) - } - } - return out -} diff --git a/load.go b/load.go index 4eaa717f..f6dcdd8c 100644 --- a/load.go +++ b/load.go @@ -4,12 +4,10 @@ import ( goerrors "errors" "fmt" "os" - "path" "strings" "github.com/goccy/go-yaml" "github.com/moby/buildkit/frontend/dockerfile/shell" - "github.com/moby/buildkit/frontend/dockerui" "github.com/pkg/errors" "golang.org/x/exp/maps" ) @@ -76,204 +74,45 @@ func expandArgs(lex *shell.Lex, s string, args map[string]string, allowArg func( } } - return result.Result, goerrors.Join(errs...) + return result.Result, errors.Wrap(goerrors.Join(errs...), "error performing variable expansion") } -func (s *Source) substituteBuildArgs(args map[string]string, allowArg func(key string) bool) error { - lex := shell.NewLex('\\') - // force the shell lexer to skip unresolved env vars so they aren't - // replaced with "" - lex.SkipUnsetEnv = true - - var errs []error - appendErr := func(err error) { - errs = append(errs, err) - } - - switch { - case s.DockerImage != nil: - updated, err := expandArgs(lex, s.DockerImage.Ref, args, allowArg) - if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on docker image ref: %w", err)) - } - s.DockerImage.Ref = updated - - if s.DockerImage.Cmd != nil { - for _, mnt := range s.DockerImage.Cmd.Mounts { - err := mnt.Spec.substituteBuildArgs(args, allowArg) - if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on docker image mount: %w", err)) - } - } - } - case s.Git != nil: - updated, err := expandArgs(lex, s.Git.URL, args, allowArg) - s.Git.URL = updated - if err != nil { - appendErr(err) - } - - updated, err = expandArgs(lex, s.Git.Commit, args, allowArg) - s.Git.Commit = updated - if err != nil { - appendErr(err) - } - - case s.HTTP != nil: - updated, err := expandArgs(lex, s.HTTP.URL, args, allowArg) - if err != nil { - appendErr(err) - } - s.HTTP.URL = updated - case s.Context != nil: - updated, err := expandArgs(lex, s.Context.Name, args, allowArg) - s.Context.Name = updated - if err != nil { - appendErr(err) - } - case s.Build != nil: - err := s.Build.Source.substituteBuildArgs(args, allowArg) - if err != nil { - appendErr(err) - } - - updated, err := expandArgs(lex, s.Build.DockerfilePath, args, allowArg) - if err != nil { - appendErr(err) - } - s.Build.DockerfilePath = updated - - updated, err = expandArgs(lex, s.Build.Target, args, allowArg) - if err != nil { - appendErr(err) - } - s.Build.Target = updated - } - - return goerrors.Join(errs...) -} +var errUnknownArg = errors.New("unknown arg") -func fillDefaults(s *Source) { - switch { - case s.DockerImage != nil: - if s.DockerImage.Cmd != nil { - for _, mnt := range s.DockerImage.Cmd.Mounts { - fillDefaults(&mnt.Spec) - } - } - case s.Git != nil: - case s.HTTP != nil: - case s.Context != nil: - if s.Context.Name == "" { - s.Context.Name = dockerui.DefaultLocalNameContext - } - case s.Build != nil: - fillDefaults(&s.Build.Source) - case s.Inline != nil: - } +type SubstituteConfig struct { + AllowArg func(string) bool } -func (s *Source) validate(failContext ...string) (retErr error) { - count := 0 - - defer func() { - if retErr != nil && failContext != nil { - retErr = errors.Wrap(retErr, strings.Join(failContext, " ")) - } - }() - - for _, g := range s.Generate { - if err := g.Validate(); err != nil { - retErr = goerrors.Join(retErr, err) - } - } - - if s.DockerImage != nil { - if s.DockerImage.Ref == "" { - retErr = goerrors.Join(retErr, fmt.Errorf("docker image source variant must have a ref")) - } - - if s.DockerImage.Cmd != nil { - // If someone *really* wants to extract the entire rootfs, they need to say so explicitly. - // We won't fill this in for them, particularly because this is almost certainly not the user's intent. - if s.Path == "" { - retErr = goerrors.Join(retErr, errors.Errorf("source path cannot be empty")) - } - - for _, mnt := range s.DockerImage.Cmd.Mounts { - if err := mnt.validate(s.Path); err != nil { - retErr = goerrors.Join(retErr, err) - } - if err := mnt.Spec.validate("docker image source with ref", "'"+s.DockerImage.Ref+"'"); err != nil { - retErr = goerrors.Join(retErr, err) - } - } - } - - count++ - } - - if s.Git != nil { - count++ - } - if s.HTTP != nil { - if err := s.HTTP.validate(); err != nil { - retErr = goerrors.Join(retErr, err) - } - count++ - } - if s.Context != nil { - count++ - } - if s.Build != nil { - c := s.Build.DockerfilePath - if err := s.Build.validate("build source with dockerfile", "`"+c+"`"); err != nil { - retErr = goerrors.Join(retErr, err) - } - - count++ - } - - if s.Inline != nil { - if err := s.Inline.validate(s.Path); err != nil { - retErr = goerrors.Join(retErr, err) - } - count++ - } - - switch count { - case 0: - retErr = goerrors.Join(retErr, fmt.Errorf("no non-nil source variant")) - case 1: - return retErr - default: - retErr = goerrors.Join(retErr, fmt.Errorf("more than one source variant defined")) - } +type SubstituteOpt func(*SubstituteConfig) - return retErr +// AllowAnyArg can be used to set [SubstituteConfig.AllowArg] to allow any arg +// to be substituted regardless of whether it is declared in the spec. +func AllowAnyArg(s string) bool { + return true } -var errUnknownArg = errors.New("unknown arg") - -type SubstituteConfig struct { - AllowArg func(string) bool +// WithAllowAnyArg is a [SubstituteOpt] that sets [SubstituteConfig.AllowArg] to +// [AllowAnyArg]. +func WithAllowAnyArg(cfg *SubstituteConfig) { + cfg.AllowArg = AllowAnyArg } -type SubstituteOpt func(*SubstituteConfig) +// DisallowAllUndeclared can be used to set [SubstituteConfig.AllowArg] to disallow args +// unless they are declared in the spec. +// This is used by default when substituting args. +func DisallowAllUndeclared(s string) bool { + return false +} func (s *Spec) SubstituteArgs(env map[string]string, opts ...SubstituteOpt) error { var cfg SubstituteConfig + cfg.AllowArg = DisallowAllUndeclared + for _, o := range opts { o(&cfg) } - if cfg.AllowArg == nil { - cfg.AllowArg = func(string) bool { - return false - } - } - lex := shell.NewLex('\\') // force the shell lexer to skip unresolved env vars so they aren't // replaced with "" @@ -303,35 +142,26 @@ func (s *Spec) SubstituteArgs(env map[string]string, opts ...SubstituteOpt) erro } for name, src := range s.Sources { - if err := src.substituteBuildArgs(args, cfg.AllowArg); err != nil { - appendErr(fmt.Errorf("error performing shell expansion on source %q: %w", name, err)) - } - if src.DockerImage != nil { - if err := src.DockerImage.Cmd.processBuildArgs(lex, args, name, cfg.AllowArg); err != nil { - appendErr(fmt.Errorf("error performing shell expansion on source %q: %w", name, err)) - } + if err := src.processBuildArgs(args, cfg.AllowArg); err != nil { + appendErr(errors.Wrapf(err, "source %q", name)) } s.Sources[name] = src } updated, err := expandArgs(lex, s.Version, args, cfg.AllowArg) if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on version: %w", err)) + appendErr(errors.Wrap(err, "version")) } s.Version = updated updated, err = expandArgs(lex, s.Revision, args, cfg.AllowArg) if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on revision: %w", err)) + appendErr(errors.Wrap(err, "revision")) } s.Revision = updated - for k, v := range s.Build.Env { - updated, err := expandArgs(lex, v, args, cfg.AllowArg) - if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on env var %q: %w", k, err)) - } - s.Build.Env[k] = updated + if err := s.Build.processBuildArgs(lex, args, cfg.AllowArg); err != nil { + appendErr(errors.Wrap(err, "build")) } if s.Build.NetworkMode != "" { @@ -344,30 +174,35 @@ func (s *Spec) SubstituteArgs(env map[string]string, opts ...SubstituteOpt) erro for i, step := range s.Build.Steps { bs := &step - if err := bs.processBuildArgs(lex, args, i, cfg.AllowArg); err != nil { - appendErr(fmt.Errorf("error performing shell expansion on build step %d: %w", i, err)) + if err := bs.processBuildArgs(lex, args, cfg.AllowArg); err != nil { + appendErr(errors.Wrapf(err, "step index %d", i)) } s.Build.Steps[i] = *bs } for _, t := range s.Tests { - if err := t.processBuildArgs(lex, args, t.Name, cfg.AllowArg); err != nil { - appendErr(fmt.Errorf("error performing shell expansion on test %q: %w", t.Name, err)) + if err := t.processBuildArgs(lex, args, cfg.AllowArg); err != nil { + appendErr(err) } } for name, t := range s.Targets { - if err := t.processBuildArgs(name, lex, args, cfg.AllowArg); err != nil { - appendErr(fmt.Errorf("error processing build args for target %q: %w", name, err)) + if err := t.processBuildArgs(lex, args, cfg.AllowArg); err != nil { + appendErr(errors.Wrapf(err, "target %s", name)) } + s.Targets[name] = t } if s.PackageConfig != nil { if err := s.PackageConfig.processBuildArgs(lex, args, cfg.AllowArg); err != nil { - appendErr(fmt.Errorf("could not process build args for base spec package config: %w", err)) + appendErr(errors.Wrap(err, "package config")) } } + if err := s.Dependencies.processBuildArgs(args, cfg.AllowArg); err != nil { + appendErr(errors.Wrap(err, "dependencies")) + } + return goerrors.Join(errs...) } @@ -407,19 +242,19 @@ func stripXFields(dt []byte) ([]byte, error) { return yaml.Marshal(obj) } -func (s *BuildStep) processBuildArgs(lex *shell.Lex, args map[string]string, i int, allowArg func(string) bool) error { +func (s *BuildStep) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { var errs []error for k, v := range s.Env { updated, err := expandArgs(lex, v, args, allowArg) if err != nil { - errs = append(errs, fmt.Errorf("error performing shell expansion on env var %q for step %d: %w", k, i, err)) + errs = append(errs, errors.Wrapf(err, "env %s=%s", k, v)) } s.Env[k] = updated } return goerrors.Join(errs...) } -func (c *Command) processBuildArgs(lex *shell.Lex, args map[string]string, name string, allowArg func(string) bool) error { +func (c *Command) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { if c == nil { return nil } @@ -429,23 +264,32 @@ func (c *Command) processBuildArgs(lex *shell.Lex, args map[string]string, name errs = append(errs, err) } - for _, s := range c.Mounts { - if err := s.Spec.substituteBuildArgs(args, allowArg); err != nil { - appendErr(fmt.Errorf("error performing shell expansion on source ref %q: %w", name, err)) + for i, s := range c.Mounts { + if err := s.processBuildArgs(args, allowArg); err != nil { + appendErr(err) + continue } + c.Mounts[i] = s } + for k, v := range c.Env { updated, err := expandArgs(lex, v, args, allowArg) if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on env var %q for source %q: %w", k, name, err)) + appendErr(errors.Wrapf(err, "env %s=%v", k, v)) + continue } c.Env[k] = updated } for i, step := range c.Steps { + if err := step.processBuildArgs(lex, args, allowArg); err != nil { + appendErr(errors.Wrapf(err, "step index %d", i)) + + } for k, v := range step.Env { updated, err := expandArgs(lex, v, args, allowArg) if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on env var %q for source %q: %w", k, name, err)) + appendErr(errors.Wrapf(err, "step env %s=%s", k, v)) + continue } step.Env[k] = updated @@ -472,68 +316,38 @@ func (s *Spec) FillDefaults() { } } - if s.Dependencies != nil { - for i := range len(s.Dependencies.ExtraRepos) { - fillExtraRepoDefaults(&s.Dependencies.ExtraRepos[i]) - } - } -} - -func fillExtraRepoDefaults(extraRepo *PackageRepositoryConfig) { - if len(extraRepo.Envs) == 0 { - // default to all stages for the extra repo if unspecified - extraRepo.Envs = []string{"build", "install", "test"} - } - - for configName := range extraRepo.Config { - configSource := extraRepo.Config[configName] - fillDefaults(&configSource) - extraRepo.Config[configName] = configSource - } - - for keyName := range extraRepo.Keys { - keySource := extraRepo.Keys[keyName] - fillDefaults(&keySource) - - // Default to 0644 permissions for gpg keys. This is because apt will will only import - // keys with a particular permission set. - if keySource.HTTP != nil { - keySource.HTTP.Permissions = 0644 - } + s.Dependencies.fillDefaults() - extraRepo.Keys[keyName] = keySource - } - - for _, mount := range extraRepo.Data { - fillDefaults(&mount.Spec) + for k := range s.Targets { + t := s.Targets[k] + t.fillDefaults() + s.Targets[k] = t } } func (s Spec) Validate() error { - var outErr error + var errs []error for name, src := range s.Sources { if strings.ContainsRune(name, os.PathSeparator) { - outErr = goerrors.Join(outErr, &InvalidSourceError{Name: name, Err: sourceNamePathSeparatorError}) + errs = append(errs, &InvalidSourceError{Name: name, Err: sourceNamePathSeparatorError}) } if err := src.validate(); err != nil { - outErr = goerrors.Join(&InvalidSourceError{Name: name, Err: fmt.Errorf("error validating source ref %q: %w", name, err)}) + errs = append(errs, &InvalidSourceError{Name: name, Err: fmt.Errorf("error validating source ref %q: %w", name, err)}) } if src.DockerImage != nil && src.DockerImage.Cmd != nil { for p, cfg := range src.DockerImage.Cmd.CacheDirs { if _, err := sharingMode(cfg.Mode); err != nil { - outErr = goerrors.Join(&InvalidSourceError{Name: name, Err: errors.Wrapf(err, "invalid sharing mode for source %q with cache mount at path %q", name, p)}) + errs = append(errs, &InvalidSourceError{Name: name, Err: errors.Wrapf(err, "invalid sharing mode for source %q with cache mount at path %q", name, p)}) } } } } for _, t := range s.Tests { - for p, cfg := range t.CacheDirs { - if _, err := sharingMode(cfg.Mode); err != nil { - outErr = goerrors.Join(errors.Wrapf(err, "invalid sharing mode for test %q with cache mount at path %q", t.Name, p)) - } + if err := t.validate(); err != nil { + errs = append(errs, errors.Wrap(err, t.Name)) } } @@ -541,12 +355,12 @@ func (s Spec) Validate() error { for _, patch := range patches { patchSrc, ok := s.Sources[patch.Source] if !ok { - outErr = goerrors.Join(outErr, &InvalidPatchError{Source: src, PatchSpec: &patch, Err: errMissingSource}) + errs = append(errs, &InvalidPatchError{Source: src, PatchSpec: &patch, Err: errMissingSource}) continue } if err := validatePatch(patch, patchSrc); err != nil { - outErr = goerrors.Join(outErr, &InvalidPatchError{Source: src, PatchSpec: &patch, Err: err}) + errs = append(errs, &InvalidPatchError{Source: src, PatchSpec: &patch, Err: err}) } } } @@ -554,12 +368,21 @@ func (s Spec) Validate() error { switch s.Build.NetworkMode { case "", netModeNone, netModeSandbox: default: - outErr = goerrors.Join(outErr, fmt.Errorf("invalid network mode: %q: valid values %s", s.Build.NetworkMode, []string{netModeNone, netModeSandbox})) + errs = append(errs, fmt.Errorf("invalid network mode: %q: valid values %s", s.Build.NetworkMode, []string{netModeNone, netModeSandbox})) } - return outErr -} + if err := s.Dependencies.validate(); err != nil { + errs = append(errs, errors.Wrap(err, "dependencies")) + } + for k, t := range s.Targets { + if err := t.validate(); err != nil { + errs = append(errs, errors.Wrapf(err, "target %s", k)) + } + } + + return goerrors.Join(errs...) +} func validatePatch(patch PatchSpec, patchSrc Source) error { if SourceIsDir(patchSrc) { // Patch sources that use directory-backed sources require a subpath in the @@ -578,108 +401,6 @@ func validatePatch(patch PatchSpec, patchSrc Source) error { return nil } -func (c *CheckOutput) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { - for i, contains := range c.Contains { - updated, err := expandArgs(lex, contains, args, allowArg) - if err != nil { - return errors.Wrap(err, "error performing shell expansion on contains") - } - c.Contains[i] = updated - } - - updated, err := expandArgs(lex, c.EndsWith, args, allowArg) - if err != nil { - return errors.Wrap(err, "error performing shell expansion on endsWith") - } - c.EndsWith = updated - - updated, err = expandArgs(lex, c.Matches, args, allowArg) - if err != nil { - return errors.Wrap(err, "error performing shell expansion on matches") - } - c.Matches = updated - - updated, err = expandArgs(lex, c.Equals, args, allowArg) - if err != nil { - return errors.Wrap(err, "error performing shell expansion on equals") - } - c.Equals = updated - - updated, err = expandArgs(lex, c.StartsWith, args, allowArg) - if err != nil { - return errors.Wrap(err, "error performing shell expansion on startsWith") - } - c.StartsWith = updated - return nil -} - -func (c *TestSpec) processBuildArgs(lex *shell.Lex, args map[string]string, name string, allowArg func(string) bool) error { - var errs []error - appendErr := func(err error) { - errs = append(errs, err) - } - - for _, s := range c.Mounts { - err := s.Spec.substituteBuildArgs(args, allowArg) - if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on source ref %q: %w", name, err)) - } - } - - for k, v := range c.Env { - updated, err := expandArgs(lex, v, args, allowArg) - if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on env var %q for source %q: %w", k, name, err)) - } - c.Env[k] = updated - } - - for i, step := range c.Steps { - for k, v := range step.Env { - updated, err := expandArgs(lex, v, args, allowArg) - if err != nil { - appendErr(fmt.Errorf("error performing shell expansion on env var %q for source %q: %w", k, name, err)) - } - step.Env[k] = updated - c.Steps[i] = step - } - } - - for i, step := range c.Steps { - stdout := step.Stdout - if err := stdout.processBuildArgs(lex, args, allowArg); err != nil { - appendErr(err) - } - step.Stdout = stdout - - stderr := step.Stderr - if err := stderr.processBuildArgs(lex, args, allowArg); err != nil { - appendErr(err) - } - - step.Stderr = stderr - c.Steps[i] = step - } - - for name, f := range c.Files { - if err := f.processBuildArgs(lex, args, allowArg); err != nil { - appendErr(fmt.Errorf("error performing shell expansion to check output of file %s: %w", name, err)) - } - c.Files[name] = f - } - - return goerrors.Join(errs...) -} - -func (c *FileCheckOutput) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { - check := c.CheckOutput - if err := check.processBuildArgs(lex, args, allowArg); err != nil { - return err - } - c.CheckOutput = check - return nil -} - func (g *SourceGenerator) Validate() error { if g.Gomod == nil { // Gomod is the only valid generator type @@ -690,38 +411,39 @@ func (g *SourceGenerator) Validate() error { } func (s *PackageSigner) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { + var errs []error for k, v := range s.Args { updated, err := expandArgs(lex, v, args, allowArg) if err != nil { - return fmt.Errorf("error performing shell expansion on env var %q: %w", k, err) + errs = append(errs, errors.Wrapf(err, "arg %s=%s", k, v)) + continue } s.Args[k] = updated } - return nil + return goerrors.Join(errs...) } -func (t *Target) processBuildArgs(name string, lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { - for _, tt := range t.Tests { - if err := tt.processBuildArgs(lex, args, path.Join(name, tt.Name), allowArg); err != nil { - return err - } - } - - if t.PackageConfig != nil { - if err := t.PackageConfig.processBuildArgs(lex, args, allowArg); err != nil { - return fmt.Errorf("error processing package config build args: %w", err) +func (cfg *PackageConfig) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { + if cfg.Signer != nil { + if err := cfg.Signer.processBuildArgs(lex, args, allowArg); err != nil { + return errors.Wrap(err, "signer") } } return nil } -func (cfg *PackageConfig) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { - if cfg.Signer != nil { - if err := cfg.Signer.processBuildArgs(lex, args, allowArg); err != nil { - return fmt.Errorf("could not process build args for signer config: %w", err) +func (b *ArtifactBuild) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { + var errs []error + + for k, v := range b.Env { + updated, err := expandArgs(lex, v, args, allowArg) + if err != nil { + errs = append(errs, errors.Wrapf(err, "env %s=%s", k, v)) + continue } + b.Env[k] = updated } - return nil + return goerrors.Join(errs...) } diff --git a/load_test.go b/load_test.go index 69a0814f..a0569b43 100644 --- a/load_test.go +++ b/load_test.go @@ -620,7 +620,7 @@ func TestCustomRepoFillDefaults(t *testing.T) { // Also, the env field should be set to all build stages, "build", "install", and "test", as it is // unspecified dt := []byte(` -dependencies: +dependencies: &deps extra_repos: - config: custom.repo: @@ -629,6 +629,9 @@ dependencies: public.gpg: context: {} path: "public.gpg" +targets: + foo: + dependencies: *deps `) spec, err := LoadSpec(dt) @@ -642,26 +645,134 @@ dependencies: } extraRepo := spec.Dependencies.ExtraRepos[0] - assert.Equal(t, extraRepo.Config["custom.repo"].Context.Name, - dockerui.DefaultLocalNameContext) + assert.Check(t, cmp.Equal(extraRepo.Config["custom.repo"].Context.Name, + dockerui.DefaultLocalNameContext)) - assert.Equal(t, extraRepo.Keys["public.gpg"].Context.Name, - dockerui.DefaultLocalNameContext) + assert.Check(t, cmp.Equal(extraRepo.Keys["public.gpg"].Context.Name, + dockerui.DefaultLocalNameContext)) + + assert.Check(t, cmp.DeepEqual(extraRepo.Envs, []string{"build", "install", "test"})) + + extraRepo = spec.Targets["foo"].Dependencies.ExtraRepos[0] + assert.Check(t, cmp.Equal(extraRepo.Config["custom.repo"].Context.Name, + dockerui.DefaultLocalNameContext)) + + assert.Check(t, cmp.Equal(extraRepo.Keys["public.gpg"].Context.Name, + dockerui.DefaultLocalNameContext)) + + assert.Check(t, cmp.DeepEqual(extraRepo.Envs, []string{"build", "install", "test"})) - assert.DeepEqual(t, extraRepo.Envs, []string{"build", "install", "test"}) } func TestBuildArgSubst(t *testing.T) { t.Run("value provided", func(t *testing.T) { dt := []byte(` args: - test: + SOME_ARG: + +version: 1.2.${SOME_ARG} +revision: ${SOME_ARG}ing + +x-vars: + img-src: &img-src + path: / + image: + ref: whatever + cmd: + env: + TEST: ${SOME_ARG} + git-src: &git-src + git: + url: https://${SOME_ARG} + commit: baddecaf${SOME_ARG} + http-src: &http-src + http: + url: https://${SOME_ARG} + context-src: &context-src + context: + name: ${SOME_ARG} + build-src: &build-src + build: + dockerfile_path: /foo/bar/${SOME_ARG} + source: *http-src + +sources: + img: *img-src + git: *git-src + http: *http-src + context: *context-src + build: *build-src build: + env: + TEST_TOP: ${SOME_ARG} steps: - command: echo $TEST env: - TEST: ${test} + TEST: ${SOME_ARG} + +tests: &tests + - name: a test + mounts: + - dest: /a + spec: *img-src + - dest: /a + spec: *git-src + - dest: /a + spec: *http-src + - dest: /a + spec: *context-src + - dest: /a + spec: *build-src + files: + foo: &check-output + equals: ${SOME_ARG} + contains: + - ${SOME_ARG} + starts_with: ${SOME_ARG} + ends_with: ${SOME_ARG} + steps: + - command: ${SOME_ARG} + stdout: *check-output + stderr: *check-output + stdin: ${SOME_ARG} + +dependencies: &deps + extra_repos: + - keys: + img: *img-src + git: *git-src + http: *http-src + context: *context-src + build: *build-src + config: + img: *img-src + git: *git-src + http: *http-src + context: *context-src + build: *build-src + data: + - dest: /a + spec: *img-src + - dest: /a + spec: *git-src + - dest: /a + spec: *http-src + - dest: /a + spec: *context-src + - dest: /a + spec: *build-src + +package_config: &pkg-config + signer: + args: + FOO: ${SOME_ARG} + +targets: + foo: + tests: *tests + dependencies: *deps + package_config: *pkg-config `) spec, err := LoadSpec(dt) @@ -670,10 +781,125 @@ build: } err = spec.SubstituteArgs(map[string]string{ - "test": "test", + "SOME_ARG": "test", }) assert.NilError(t, err) - assert.Equal(t, spec.Build.Steps[0].Env["TEST"], "test") + + assert.Check(t, cmp.Equal(spec.Version, "1.2.test")) + assert.Check(t, cmp.Equal(spec.Revision, "testing")) + assert.Check(t, cmp.Equal(spec.Sources["img"].DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(spec.Sources["git"].Git.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Sources["git"].Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(spec.Sources["http"].HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Sources["context"].Context.Name, "test")) + assert.Check(t, cmp.Equal(spec.Sources["build"].Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(spec.Sources["build"].Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(spec.Build.Env["TEST_TOP"], "test")) + assert.Check(t, cmp.Equal(spec.Build.Steps[0].Env["TEST"], "test")) + + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Keys["img"].DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Keys["git"].Git.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Keys["git"].Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Keys["http"].HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Keys["context"].Context.Name, "test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Keys["build"].Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Keys["build"].Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Config["img"].DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Config["git"].Git.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Config["git"].Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Config["http"].HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Config["context"].Context.Name, "test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Config["build"].Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Config["build"].Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Data[0].Spec.DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Data[1].Spec.Git.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Data[1].Spec.Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Data[2].Spec.HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Data[3].Spec.Context.Name, "test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Data[4].Spec.Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(spec.Dependencies.ExtraRepos[0].Data[4].Spec.Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(spec.Tests[0].Mounts[0].Spec.DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Mounts[1].Spec.Git.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Mounts[1].Spec.Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(spec.Tests[0].Mounts[2].Spec.HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Mounts[3].Spec.Context.Name, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Mounts[4].Spec.Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Mounts[4].Spec.Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(spec.Tests[0].Files["foo"].Equals, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Files["foo"].Contains[0], "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Files["foo"].StartsWith, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Files["foo"].EndsWith, "test")) + + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stdin, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stdout.Equals, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stdout.Contains[0], "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stdout.StartsWith, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stdout.EndsWith, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stderr.Equals, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stderr.Contains[0], "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stderr.StartsWith, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Stderr.EndsWith, "test")) + assert.Check(t, cmp.Equal(spec.Tests[0].Steps[0].Command, "test")) + + assert.Check(t, cmp.Equal(spec.PackageConfig.Signer.Args["FOO"], "test")) + + // Now test the same things but for items defined under the targets section. + target := spec.Targets["foo"] + + assert.Check(t, cmp.Equal(target.Tests[0].Mounts[0].Spec.DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Mounts[1].Spec.Git.URL, "https://test")) + assert.Check(t, cmp.Equal(target.Tests[0].Mounts[1].Spec.Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(target.Tests[0].Mounts[2].Spec.HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(target.Tests[0].Mounts[3].Spec.Context.Name, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Mounts[4].Spec.Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(target.Tests[0].Mounts[4].Spec.Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(target.Tests[0].Files["foo"].Equals, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Files["foo"].Contains[0], "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Files["foo"].StartsWith, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Files["foo"].EndsWith, "test")) + + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stdin, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stdout.Equals, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stdout.Contains[0], "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stdout.StartsWith, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stdout.EndsWith, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stderr.Equals, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stderr.Contains[0], "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stderr.StartsWith, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Stderr.EndsWith, "test")) + assert.Check(t, cmp.Equal(target.Tests[0].Steps[0].Command, "test")) + + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Keys["img"].DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Keys["git"].Git.URL, "https://test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Keys["git"].Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Keys["http"].HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Keys["context"].Context.Name, "test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Keys["build"].Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Keys["build"].Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Config["img"].DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Config["git"].Git.URL, "https://test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Config["git"].Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Config["http"].HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Config["context"].Context.Name, "test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Config["build"].Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Config["build"].Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Data[0].Spec.DockerImage.Cmd.Env["TEST"], "test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Data[1].Spec.Git.URL, "https://test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Data[1].Spec.Git.Commit, "baddecaftest")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Data[2].Spec.HTTP.URL, "https://test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Data[3].Spec.Context.Name, "test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Data[4].Spec.Build.DockerfilePath, "/foo/bar/test")) + assert.Check(t, cmp.Equal(target.Dependencies.ExtraRepos[0].Data[4].Spec.Build.Source.HTTP.URL, "https://test")) + + assert.Check(t, cmp.Equal(target.PackageConfig.Signer.Args["FOO"], "test")) }) t.Run("default value", func(t *testing.T) { @@ -695,7 +921,8 @@ build: err = spec.SubstituteArgs(map[string]string{}) assert.NilError(t, err) - assert.Equal(t, spec.Build.Steps[0].Env["TEST"], "test") + + assert.Check(t, cmp.Equal(spec.Build.Steps[0].Env["TEST"], "test")) }) t.Run("build arg undeclared", func(t *testing.T) { @@ -715,7 +942,7 @@ build: } err = spec.SubstituteArgs(map[string]string{}) - assert.ErrorContains(t, err, `error performing shell expansion on build step 0: error performing shell expansion on env var "TEST" for step 0: build arg "test" not declared`) + assert.ErrorContains(t, err, `step index 0: env TEST=${test}: error performing variable expansion: build arg "test" not declared`) }) t.Run("multiple undefined build args", func(t *testing.T) { @@ -745,9 +972,9 @@ build: err = spec.SubstituteArgs(map[string]string{}) // all occurrences of undefined build args should be reported - assert.ErrorContains(t, err, `error performing shell expansion on source "test1": build arg "COMMIT1" not declared`) - assert.ErrorContains(t, err, `error performing shell expansion on source "test2": build arg "URL1" not declared`) - assert.ErrorContains(t, err, `error performing shell expansion on build step 0: error performing shell expansion on env var "TEST" for step 0: build arg "COMMIT1" not declared`) + assert.ErrorContains(t, err, `build arg "COMMIT1" not declared`) + assert.ErrorContains(t, err, `build arg "URL1" not declared`) + assert.ErrorContains(t, err, `build arg "COMMIT1" not declared`) }) t.Run("builtin build arg", func(t *testing.T) { diff --git a/source.go b/source.go index bc55a8ef..8aeb46c1 100644 --- a/source.go +++ b/source.go @@ -4,12 +4,15 @@ import ( "bufio" "bytes" "encoding/json" + goerrors "errors" "fmt" "io" "path/filepath" "strings" "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/moby/buildkit/frontend/dockerui" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/gitutil" "github.com/pkg/errors" @@ -141,7 +144,6 @@ func (src *SourceGit) AsState(opts ...llb.ConstraintsOpt) (llb.State, error) { st := llb.Git(ref.Remote, src.Commit, gOpts...) return st, nil - // TODO: Pass git secrets } func (src *SourceDockerImage) AsState(name string, path string, sOpt SourceOpts, opts ...llb.ConstraintsOpt) (llb.State, error) { @@ -276,9 +278,22 @@ func (m *SourceMount) validate(root string) error { // We cannot support this as the base mount for subPath will shadow the mount being done here. return errors.Wrapf(errInvalidMountConfig, "mount destination (%s) must not be a descendent of the target source path (%s)", m.Dest, root) } + return m.Spec.validate() +} + +func (m *SourceMount) processBuildArgs(args map[string]string, allowArg func(string) bool) error { + if err := m.Spec.processBuildArgs(args, allowArg); err != nil { + return errors.Wrapf(err, "mount dest: %s", m.Dest) + } return nil } +func (m *SourceMount) fillDefaults() { + src := m.Spec + fillDefaults(&src) + m.Spec = src +} + // must not be called with a nil cmd pointer // subPath must be a valid non-empty path func generateSourceFromImage(st llb.State, cmd *Command, sOpts SourceOpts, subPath string, opts ...llb.ConstraintsOpt) (llb.State, error) { @@ -671,10 +686,180 @@ func Sources(spec *Spec, sOpt SourceOpts, opts ...llb.ConstraintsOpt) (map[strin st, err := src.AsState(k, sOpt, opts...) if err != nil { - return nil, errors.Wrapf(err, "could not get source state for source: %s", k) + return nil, errors.Wrapf(err, "could not get source stat e for source: %s", k) } states[k] = st } return states, nil } + +func fillDefaults(s *Source) { + switch { + case s.DockerImage != nil: + if s.DockerImage.Cmd != nil { + for _, mnt := range s.DockerImage.Cmd.Mounts { + fillDefaults(&mnt.Spec) + } + } + case s.Git != nil: + case s.HTTP != nil: + case s.Context != nil: + if s.Context.Name == "" { + s.Context.Name = dockerui.DefaultLocalNameContext + } + case s.Build != nil: + fillDefaults(&s.Build.Source) + case s.Inline != nil: + } +} + +func (s *Source) processBuildArgs(args map[string]string, allowArg func(key string) bool) error { + lex := shell.NewLex('\\') + // force the shell lexer to skip unresolved env vars so they aren't + // replaced with "" + lex.SkipUnsetEnv = true + var errs []error + appendErr := func(err error) { + errs = append(errs, err) + } + + switch { + case s.DockerImage != nil: + updated, err := expandArgs(lex, s.DockerImage.Ref, args, allowArg) + if err != nil { + appendErr(fmt.Errorf("image ref: %w", err)) + } + s.DockerImage.Ref = updated + + if s.DockerImage.Cmd != nil { + if err := s.DockerImage.Cmd.processBuildArgs(lex, args, allowArg); err != nil { + appendErr(errors.Wrap(err, "docker image cmd source")) + } + } + case s.Git != nil: + updated, err := expandArgs(lex, s.Git.URL, args, allowArg) + s.Git.URL = updated + if err != nil { + appendErr(err) + } + + updated, err = expandArgs(lex, s.Git.Commit, args, allowArg) + s.Git.Commit = updated + if err != nil { + appendErr(err) + } + + case s.HTTP != nil: + updated, err := expandArgs(lex, s.HTTP.URL, args, allowArg) + if err != nil { + appendErr(err) + } + s.HTTP.URL = updated + case s.Context != nil: + updated, err := expandArgs(lex, s.Context.Name, args, allowArg) + s.Context.Name = updated + if err != nil { + appendErr(err) + } + case s.Build != nil: + err := s.Build.Source.processBuildArgs(args, allowArg) + if err != nil { + appendErr(err) + } + + updated, err := expandArgs(lex, s.Build.DockerfilePath, args, allowArg) + if err != nil { + appendErr(err) + } + s.Build.DockerfilePath = updated + + updated, err = expandArgs(lex, s.Build.Target, args, allowArg) + if err != nil { + appendErr(err) + } + s.Build.Target = updated + } + + return goerrors.Join(errs...) +} + +func (s *Source) validate(failContext ...string) (retErr error) { + count := 0 + + defer func() { + if retErr != nil && failContext != nil { + retErr = errors.Wrap(retErr, strings.Join(failContext, " ")) + } + }() + + for _, g := range s.Generate { + if err := g.Validate(); err != nil { + retErr = goerrors.Join(retErr, err) + } + } + + if s.DockerImage != nil { + if s.DockerImage.Ref == "" { + retErr = goerrors.Join(retErr, fmt.Errorf("docker image source variant must have a ref")) + } + + if s.DockerImage.Cmd != nil { + // If someone *really* wants to extract the entire rootfs, they need to say so explicitly. + // We won't fill this in for them, particularly because this is almost certainly not the user's intent. + if s.Path == "" { + retErr = goerrors.Join(retErr, errors.Errorf("source path cannot be empty")) + } + + for _, mnt := range s.DockerImage.Cmd.Mounts { + if err := mnt.validate(s.Path); err != nil { + retErr = goerrors.Join(retErr, err) + } + if err := mnt.Spec.validate("docker image source with ref", "'"+s.DockerImage.Ref+"'"); err != nil { + retErr = goerrors.Join(retErr, err) + } + } + } + + count++ + } + + if s.Git != nil { + count++ + } + if s.HTTP != nil { + if err := s.HTTP.validate(); err != nil { + retErr = goerrors.Join(retErr, err) + } + count++ + } + if s.Context != nil { + count++ + } + if s.Build != nil { + c := s.Build.DockerfilePath + if err := s.Build.validate("build source with dockerfile", "`"+c+"`"); err != nil { + retErr = goerrors.Join(retErr, err) + } + + count++ + } + + if s.Inline != nil { + if err := s.Inline.validate(s.Path); err != nil { + retErr = goerrors.Join(retErr, err) + } + count++ + } + + switch count { + case 0: + retErr = goerrors.Join(retErr, fmt.Errorf("no non-nil source variant")) + case 1: + return retErr + default: + retErr = goerrors.Join(retErr, fmt.Errorf("more than one source variant defined")) + } + + return retErr +} diff --git a/spec.go b/spec.go index c26a0c34..da4251fc 100644 --- a/spec.go +++ b/spec.go @@ -2,10 +2,7 @@ package dalec import ( - "fmt" "io/fs" - "regexp" - "strings" "time" "github.com/moby/buildkit/client/llb" @@ -323,67 +320,6 @@ type SourceGenerator struct { Gomod *GeneratorGomod `yaml:"gomod" json:"gomod"` } -// PackageConstraints is used to specify complex constraints for a package dependency. -type PackageConstraints struct { - // Version is a list of version constraints for the package. - // The format of these strings is dependent on the package manager of the target system. - // Examples: - // [">=1.0.0", "<2.0.0"] - Version []string `yaml:"version,omitempty" json:"version,omitempty"` - // Arch is a list of architecture constraints for the package. - // Use this to specify that a package constraint only applies to certain architectures. - Arch []string `yaml:"arch,omitempty" json:"arch,omitempty"` -} - -// PackageDependencies is a list of dependencies for a package. -// This will be included in the package metadata so that the package manager can install the dependencies. -// It also includes build-time dedendencies, which we'll install before running any build steps. -type PackageDependencies struct { - // Build is the list of packagese required to build the package. - Build map[string]PackageConstraints `yaml:"build,omitempty" json:"build,omitempty"` - // Runtime is the list of packages required to install/run the package. - Runtime map[string]PackageConstraints `yaml:"runtime,omitempty" json:"runtime,omitempty"` - // Recommends is the list of packages recommended to install with the generated package. - // Note: Not all package managers support this (e.g. rpm) - Recommends map[string]PackageConstraints `yaml:"recommends,omitempty" json:"recommends,omitempty"` - - // Test lists any extra packages required for running tests - // These packages are only installed for tests which have steps that require - // running a command in the built container. - // See [TestSpec] for more information. - Test []string `yaml:"test,omitempty" json:"test,omitempty"` - - // ExtraRepos is used to inject extra package repositories that may be used to - // satisfy package dependencies in various stages. - ExtraRepos []PackageRepositoryConfig `yaml:"extra_repos,omitempty" json:"extra_repos,omitempty"` -} - -// PackageRepositoryConfig -type PackageRepositoryConfig struct { - // Keys are the list of keys that need to be imported to use the configured - // repositories - Keys map[string]Source `yaml:"keys,omitempty" json:"keys,omitempty"` - - // Config list of repo configs to to add to the environment. The format of - // these configs are distro specific (e.g. apt/yum configs). - Config map[string]Source `yaml:"config" json:"config"` - - // Data lists all the extra data that needs to be made available for the - // provided repository config to work. - // As an example, if the provided config is referencing a file backed repository - // then data would include the file data, assuming its not already available - // in the environment. - Data []SourceMount `yaml:"data,omitempty" json:"data,omitempty"` - // Envs specifies the list of environments to make the repositories available - // during. - // Acceptable values are: - // - "build" - Repositories are added prior to installing build dependencies - // - "test" - Repositories are added prior to installing test dependencies - // - "install" - Repositories are added prior to installing the output - // package in a container build target. - Envs []string `yaml:"envs" json:"envs" jsonschema:"enum=build,enum=test,enum=install"` -} - // ArtifactBuild configures a group of steps that are run sequentially along with their outputs to build the artifact(s). type ArtifactBuild struct { // Steps is the list of commands to run to build the artifact(s). @@ -452,29 +388,6 @@ type Frontend struct { CmdLine string `yaml:"cmdline,omitempty" json:"cmdline,omitempty"` } -// Target defines a distro-specific build target. -// This is used in [Spec] to specify the build target for a distro. -type Target struct { - // Dependencies are the different dependencies that need to be specified in the package. - Dependencies *PackageDependencies `yaml:"dependencies,omitempty" json:"dependencies,omitempty"` - - // Image is the image configuration when the target output is a container image. - Image *ImageConfig `yaml:"image,omitempty" json:"image,omitempty"` - - // Frontend is the frontend configuration to use for the target. - // This is used to forward the build to a different, dalec-compatible frontend. - // This can be useful when testing out new distros or using a different version of the frontend for a given distro. - Frontend *Frontend `yaml:"frontend,omitempty" json:"frontend,omitempty"` - - // Tests are the list of tests to run which are specific to the target. - // Tests are appended to the list of tests in the main [Spec] - Tests []*TestSpec `yaml:"tests,omitempty" json:"tests,omitempty"` - - // PackageConfig is the configuration to use for artifact targets, such as - // rpms, debs, or zip files containing Windows binaries - PackageConfig *PackageConfig `yaml:"package_config,omitempty" json:"package_config,omitempty"` -} - // PackageSigner is the configuration for defining how to sign a package type PackageSigner struct { *Frontend `yaml:",inline" json:",inline"` @@ -488,154 +401,6 @@ type PackageConfig struct { Signer *PackageSigner `yaml:"signer,omitempty" json:"signer,omitempty"` } -// TestSpec is used to execute tests against a container with the package installed in it. -type TestSpec struct { - // Name is the name of the test - // This will be used to output the test results - Name string `yaml:"name" json:"name" jsonschema:"required"` - - // Dir is the working directory to run the command in. - Dir string `yaml:"dir,omitempty" json:"dir,omitempty"` - - // Mounts is the list of sources to mount into the build steps. - Mounts []SourceMount `yaml:"mounts,omitempty" json:"mounts,omitempty"` - - // List of CacheDirs which will be used across all Steps - CacheDirs map[string]CacheDirConfig `yaml:"cache_dirs,omitempty" json:"cache_dirs,omitempty"` - - // Env is the list of environment variables to set for all commands in this step group. - Env map[string]string `yaml:"env,omitempty" json:"env,omitempty"` - - // Steps is the list of commands to run to test the package. - Steps []TestStep `yaml:"steps" json:"steps" jsonschema:"required"` - - // Files is the list of files to check after running the steps. - Files map[string]FileCheckOutput `yaml:"files,omitempty" json:"files,omitempty"` -} - -// TestStep is a wrapper for [BuildStep] to include checks on stdio streams -type TestStep struct { - // Command is the command to run to build the artifact(s). - // This will always be wrapped as /bin/sh -c "", or whatever the equivalent is for the target distro. - Command string `yaml:"command" json:"command" jsonschema:"required"` - // Env is the list of environment variables to set for the command. - Env map[string]string `yaml:"env,omitempty" json:"env,omitempty"` - // Stdout is the expected output on stdout - Stdout CheckOutput `yaml:"stdout,omitempty" json:"stdout,omitempty"` - // Stderr is the expected output on stderr - Stderr CheckOutput `yaml:"stderr,omitempty" json:"stderr,omitempty"` - // Stdin is the input to pass to stdin for the command - Stdin string `yaml:"stdin,omitempty" json:"stdin,omitempty"` -} - -// CheckOutput is used to specify the expected output of a check, such as stdout/stderr or a file. -// All non-empty fields will be checked. -type CheckOutput struct { - // Equals is the exact string to compare the output to. - Equals string `yaml:"equals,omitempty" json:"equals,omitempty"` - // Contains is the list of strings to check if they are contained in the output. - Contains []string `yaml:"contains,omitempty" json:"contains,omitempty"` - // Matches is the regular expression to match the output against. - Matches string `yaml:"matches,omitempty" json:"matches,omitempty"` - // StartsWith is the string to check if the output starts with. - StartsWith string `yaml:"starts_with,omitempty" json:"starts_with,omitempty"` - // EndsWith is the string to check if the output ends with. - EndsWith string `yaml:"ends_with,omitempty" json:"ends_with,omitempty"` - // Empty is used to check if the output is empty. - Empty bool `yaml:"empty,omitempty" json:"empty,omitempty"` -} - -// IsEmpty is used to determine if there are any checks to perform. -func (c CheckOutput) IsEmpty() bool { - return c.Equals == "" && len(c.Contains) == 0 && c.Matches == "" && c.StartsWith == "" && c.EndsWith == "" && !c.Empty -} - -// Check is used to check the output stream. -func (c CheckOutput) Check(dt string, p string) (retErr error) { - if c.Empty { - if dt != "" { - return &CheckOutputError{Kind: "empty", Expected: "", Actual: dt, Path: p} - } - - // Anything else would be nonsensical and it would make sense to return early... - // But we'll check it anyway and it should fail since this would be an invalid CheckOutput - } - - if c.Equals != "" && c.Equals != dt { - return &CheckOutputError{Expected: c.Equals, Actual: dt, Path: p} - } - - for _, contains := range c.Contains { - if contains != "" && !strings.Contains(dt, contains) { - return &CheckOutputError{Kind: "contains", Expected: contains, Actual: dt, Path: p} - } - } - if c.Matches != "" { - regexp, err := regexp.Compile(c.Matches) - if err != nil { - return err - } - - if !regexp.Match([]byte(dt)) { - return &CheckOutputError{Kind: "matches", Expected: c.Matches, Actual: dt, Path: p} - } - } - - if c.StartsWith != "" && !strings.HasPrefix(dt, c.StartsWith) { - return &CheckOutputError{Kind: "starts_with", Expected: c.StartsWith, Actual: dt, Path: p} - } - - if c.EndsWith != "" && !strings.HasSuffix(dt, c.EndsWith) { - return &CheckOutputError{Kind: "ends_with", Expected: c.EndsWith, Actual: dt, Path: p} - } - - return nil -} - -// FileCheckOutput is used to specify the expected output of a file. -type FileCheckOutput struct { - CheckOutput `yaml:",inline"` - // Permissions is the expected permissions of the file. - Permissions fs.FileMode `yaml:"permissions,omitempty" json:"permissions,omitempty"` - // IsDir is used to set the expected file mode to a directory. - IsDir bool `yaml:"is_dir,omitempty" json:"is_dir,omitempty"` - // NotExist is used to check that the file does not exist. - NotExist bool `yaml:"not_exist,omitempty" json:"not_exist,omitempty"` - - // TODO: Support checking symlinks - // This is not currently possible with buildkit as it does not expose information about the symlink -} - -// Check is used to check the output file. -func (c FileCheckOutput) Check(dt string, mode fs.FileMode, isDir bool, p string) error { - if c.IsDir && !isDir { - return &CheckOutputError{Kind: "mode", Expected: "ModeDir", Actual: "ModeFile", Path: p} - } - - if !c.IsDir && isDir { - return &CheckOutputError{Kind: "mode", Expected: "ModeFile", Actual: "ModeDir", Path: p} - } - - perm := mode.Perm() - if c.Permissions != 0 && c.Permissions != perm { - return &CheckOutputError{Kind: "permissions", Expected: c.Permissions.String(), Actual: perm.String(), Path: p} - } - - return c.CheckOutput.Check(dt, p) -} - -// CheckOutputError is used to build an error message for a failed output check for a test case. -type CheckOutputError struct { - Kind string - Expected string - Actual string - Path string -} - -func (c *CheckOutputError) Error() string { - return fmt.Sprintf("expected %q %s %q, got %q", c.Path, c.Kind, c.Expected, c.Actual) -} - func (s *SystemdConfiguration) IsEmpty() bool { if s == nil { return true diff --git a/target.go b/target.go new file mode 100644 index 00000000..5f332e7d --- /dev/null +++ b/target.go @@ -0,0 +1,71 @@ +package dalec + +import ( + goerrors "errors" + + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/pkg/errors" +) + +// Target defines a distro-specific build target. +// This is used in [Spec] to specify the build target for a distro. +type Target struct { + // Dependencies are the different dependencies that need to be specified in the package. + Dependencies *PackageDependencies `yaml:"dependencies,omitempty" json:"dependencies,omitempty"` + + // Image is the image configuration when the target output is a container image. + Image *ImageConfig `yaml:"image,omitempty" json:"image,omitempty"` + + // Frontend is the frontend configuration to use for the target. + // This is used to forward the build to a different, dalec-compatible frontend. + // This can be useful when testing out new distros or using a different version of the frontend for a given distro. + Frontend *Frontend `yaml:"frontend,omitempty" json:"frontend,omitempty"` + + // Tests are the list of tests to run which are specific to the target. + // Tests are appended to the list of tests in the main [Spec] + Tests []*TestSpec `yaml:"tests,omitempty" json:"tests,omitempty"` + + // PackageConfig is the configuration to use for artifact targets, such as + // rpms, debs, or zip files containing Windows binaries + PackageConfig *PackageConfig `yaml:"package_config,omitempty" json:"package_config,omitempty"` +} + +func (t *Target) validate() error { + var errs []error + if err := t.Dependencies.validate(); err != nil { + errs = append(errs, errors.Wrap(err, "dependencies")) + } + + for _, test := range t.Tests { + if err := test.validate(); err != nil { + errs = append(errs, errors.Wrapf(err, "test %s", test.Name)) + } + } + + return goerrors.Join(errs...) +} + +func (t *Target) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { + var errs []error + for _, tt := range t.Tests { + if err := tt.processBuildArgs(lex, args, allowArg); err != nil { + errs = append(errs, err) + } + } + + if t.PackageConfig != nil { + if err := t.PackageConfig.processBuildArgs(lex, args, allowArg); err != nil { + errs = append(errs, errors.Wrap(err, "package config")) + } + } + + if err := t.Dependencies.processBuildArgs(args, allowArg); err != nil { + errs = append(errs, errors.Wrap(err, "dependencies")) + } + + return goerrors.Join(errs...) +} + +func (t *Target) fillDefaults() { + t.Dependencies.fillDefaults() +} diff --git a/tests.go b/tests.go new file mode 100644 index 00000000..29c2ee0e --- /dev/null +++ b/tests.go @@ -0,0 +1,309 @@ +package dalec + +import ( + goerrors "errors" + "fmt" + "io/fs" + "regexp" + "strings" + + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/pkg/errors" +) + +// TestSpec is used to execute tests against a container with the package installed in it. +type TestSpec struct { + // Name is the name of the test + // This will be used to output the test results + Name string `yaml:"name" json:"name" jsonschema:"required"` + + // Dir is the working directory to run the command in. + Dir string `yaml:"dir,omitempty" json:"dir,omitempty"` + + // Mounts is the list of sources to mount into the build steps. + Mounts []SourceMount `yaml:"mounts,omitempty" json:"mounts,omitempty"` + + // List of CacheDirs which will be used across all Steps + CacheDirs map[string]CacheDirConfig `yaml:"cache_dirs,omitempty" json:"cache_dirs,omitempty"` + + // Env is the list of environment variables to set for all commands in this step group. + Env map[string]string `yaml:"env,omitempty" json:"env,omitempty"` + + // Steps is the list of commands to run to test the package. + Steps []TestStep `yaml:"steps" json:"steps" jsonschema:"required"` + + // Files is the list of files to check after running the steps. + Files map[string]FileCheckOutput `yaml:"files,omitempty" json:"files,omitempty"` +} + +// TestStep is a wrapper for [BuildStep] to include checks on stdio streams +type TestStep struct { + // Command is the command to run to build the artifact(s). + // This will always be wrapped as /bin/sh -c "", or whatever the equivalent is for the target distro. + Command string `yaml:"command" json:"command" jsonschema:"required"` + // Env is the list of environment variables to set for the command. + Env map[string]string `yaml:"env,omitempty" json:"env,omitempty"` + // Stdout is the expected output on stdout + Stdout CheckOutput `yaml:"stdout,omitempty" json:"stdout,omitempty"` + // Stderr is the expected output on stderr + Stderr CheckOutput `yaml:"stderr,omitempty" json:"stderr,omitempty"` + // Stdin is the input to pass to stdin for the command + Stdin string `yaml:"stdin,omitempty" json:"stdin,omitempty"` +} + +// CheckOutput is used to specify the expected output of a check, such as stdout/stderr or a file. +// All non-empty fields will be checked. +type CheckOutput struct { + // Equals is the exact string to compare the output to. + Equals string `yaml:"equals,omitempty" json:"equals,omitempty"` + // Contains is the list of strings to check if they are contained in the output. + Contains []string `yaml:"contains,omitempty" json:"contains,omitempty"` + // Matches is the regular expression to match the output against. + Matches string `yaml:"matches,omitempty" json:"matches,omitempty"` + // StartsWith is the string to check if the output starts with. + StartsWith string `yaml:"starts_with,omitempty" json:"starts_with,omitempty"` + // EndsWith is the string to check if the output ends with. + EndsWith string `yaml:"ends_with,omitempty" json:"ends_with,omitempty"` + // Empty is used to check if the output is empty. + Empty bool `yaml:"empty,omitempty" json:"empty,omitempty"` +} + +// FileCheckOutput is used to specify the expected output of a file. +type FileCheckOutput struct { + CheckOutput `yaml:",inline"` + // Permissions is the expected permissions of the file. + Permissions fs.FileMode `yaml:"permissions,omitempty" json:"permissions,omitempty"` + // IsDir is used to set the expected file mode to a directory. + IsDir bool `yaml:"is_dir,omitempty" json:"is_dir,omitempty"` + // NotExist is used to check that the file does not exist. + NotExist bool `yaml:"not_exist,omitempty" json:"not_exist,omitempty"` + + // TODO: Support checking symlinks + // This is not currently possible with buildkit as it does not expose information about the symlink +} + +// CheckOutputError is used to build an error message for a failed output check for a test case. +type CheckOutputError struct { + Kind string + Expected string + Actual string + Path string +} + +func (c *CheckOutputError) Error() string { + return fmt.Sprintf("expected %q %s %q, got %q", c.Path, c.Kind, c.Expected, c.Actual) +} + +// IsEmpty is used to determine if there are any checks to perform. +func (c CheckOutput) IsEmpty() bool { + return c.Equals == "" && len(c.Contains) == 0 && c.Matches == "" && c.StartsWith == "" && c.EndsWith == "" && !c.Empty +} + +func (t *TestSpec) validate() error { + var errs []error + + for _, m := range t.Mounts { + if err := m.validate("/"); err != nil { + errs = append(errs, errors.Wrapf(err, "mount %s", m.Dest)) + } + } + + for p, cfg := range t.CacheDirs { + if _, err := sharingMode(cfg.Mode); err != nil { + errs = append(errs, errors.Wrapf(err, "invalid sharing mode for test %q with cache mount at path %q", t.Name, p)) + } + } + + return goerrors.Join(errs...) +} + +func (c *CheckOutput) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { + for i, contains := range c.Contains { + updated, err := expandArgs(lex, contains, args, allowArg) + if err != nil { + return fmt.Errorf("%w: contains at list index %d", err, i) + } + c.Contains[i] = updated + } + + updated, err := expandArgs(lex, c.EndsWith, args, allowArg) + if err != nil { + return fmt.Errorf("%w: endsWith", err) + } + c.EndsWith = updated + + updated, err = expandArgs(lex, c.Matches, args, allowArg) + if err != nil { + return fmt.Errorf("%w: matches", err) + } + c.Matches = updated + + updated, err = expandArgs(lex, c.Equals, args, allowArg) + if err != nil { + return fmt.Errorf("%w: equals", err) + } + c.Equals = updated + + updated, err = expandArgs(lex, c.StartsWith, args, allowArg) + if err != nil { + return fmt.Errorf("%w: startsWith", err) + } + c.StartsWith = updated + return nil +} + +func (s *TestStep) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { + var errs []error + appendErr := func(err error) { + errs = append(errs, err) + } + + for k, v := range s.Env { + updated, err := expandArgs(lex, v, args, allowArg) + if err != nil { + appendErr(errors.Wrapf(err, "env %s=%s", k, v)) + continue + } + s.Env[k] = updated + } + + updated, err := expandArgs(lex, s.Stdin, args, allowArg) + if err != nil { + appendErr(errors.Wrap(err, "stdin")) + } + if updated != s.Stdin { + s.Stdin = updated + } + + stdout := s.Stdout + if err := stdout.processBuildArgs(lex, args, allowArg); err != nil { + appendErr(errors.Wrap(err, "stdout")) + } + s.Stdout = stdout + + stderr := s.Stderr + if err := stderr.processBuildArgs(lex, args, allowArg); err != nil { + appendErr(errors.Wrap(err, "stderr")) + } + s.Stderr = stderr + + updated, err = expandArgs(lex, s.Command, args, allowArg) + if err != nil { + appendErr(errors.Wrap(err, "command")) + } + if updated != s.Command { + s.Command = updated + } + + return goerrors.Join(errs...) +} + +func (c *TestSpec) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { + var errs []error + appendErr := func(err error) { + errs = append(errs, err) + } + + for i, s := range c.Mounts { + if err := s.processBuildArgs(args, allowArg); err != nil { + appendErr(err) + continue + } + c.Mounts[i] = s + } + + for k, v := range c.Env { + updated, err := expandArgs(lex, v, args, allowArg) + if err != nil { + appendErr(errors.Wrapf(err, "%s=%s", k, v)) + continue + } + c.Env[k] = updated + } + + for i, step := range c.Steps { + if err := step.processBuildArgs(lex, args, allowArg); err != nil { + appendErr(errors.Wrapf(err, "step index %d", i)) + continue + } + c.Steps[i] = step + } + + for name, f := range c.Files { + if err := f.processBuildArgs(lex, args, allowArg); err != nil { + appendErr(fmt.Errorf("error performing shell expansion to check output of file %s: %w", name, err)) + } + c.Files[name] = f + } + + return errors.Wrap(goerrors.Join(errs...), c.Name) +} + +func (c *FileCheckOutput) processBuildArgs(lex *shell.Lex, args map[string]string, allowArg func(string) bool) error { + check := c.CheckOutput + if err := check.processBuildArgs(lex, args, allowArg); err != nil { + return err + } + c.CheckOutput = check + return nil +} + +// Check is used to check the output stream. +func (c CheckOutput) Check(dt string, p string) (retErr error) { + if c.Empty { + if dt != "" { + return &CheckOutputError{Kind: "empty", Expected: "", Actual: dt, Path: p} + } + + // Anything else would be nonsensical and it would make sense to return early... + // But we'll check it anyway and it should fail since this would be an invalid CheckOutput + } + + if c.Equals != "" && c.Equals != dt { + return &CheckOutputError{Expected: c.Equals, Actual: dt, Path: p} + } + + for _, contains := range c.Contains { + if contains != "" && !strings.Contains(dt, contains) { + return &CheckOutputError{Kind: "contains", Expected: contains, Actual: dt, Path: p} + } + } + if c.Matches != "" { + regexp, err := regexp.Compile(c.Matches) + if err != nil { + return err + } + + if !regexp.Match([]byte(dt)) { + return &CheckOutputError{Kind: "matches", Expected: c.Matches, Actual: dt, Path: p} + } + } + + if c.StartsWith != "" && !strings.HasPrefix(dt, c.StartsWith) { + return &CheckOutputError{Kind: "starts_with", Expected: c.StartsWith, Actual: dt, Path: p} + } + + if c.EndsWith != "" && !strings.HasSuffix(dt, c.EndsWith) { + return &CheckOutputError{Kind: "ends_with", Expected: c.EndsWith, Actual: dt, Path: p} + } + + return nil +} + +// Check is used to check the output file. +func (c FileCheckOutput) Check(dt string, mode fs.FileMode, isDir bool, p string) error { + if c.IsDir && !isDir { + return &CheckOutputError{Kind: "mode", Expected: "ModeDir", Actual: "ModeFile", Path: p} + } + + if !c.IsDir && isDir { + return &CheckOutputError{Kind: "mode", Expected: "ModeFile", Actual: "ModeDir", Path: p} + } + + perm := mode.Perm() + if c.Permissions != 0 && c.Permissions != perm { + return &CheckOutputError{Kind: "permissions", Expected: c.Permissions.String(), Actual: perm.String(), Path: p} + } + + return c.CheckOutput.Check(dt, p) +}